repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
theoryno3/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
cchaoss/paparazzi | sw/misc/attitude_reference/test_att_ref.py | 49 | 3485 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import math
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
import pat.utils as pu
import pat.algebra as pa
import control as ctl
def random_setpoint(time, dt_step=2):
tf = time[0]
sp = np.zeros((len(time), 3))
sp_i = [0, 0, 0]
for i in range(0, len(time)):
if time[i] >= tf:
ui = np.random.rand(3) - [0.5, 0.5, 0.5];
ai = np.random.rand(1)
n = np.linalg.norm(ui)
if n > 0:
ui /= n
sp_i = pa.euler_of_quat(pa.quat_of_axis_angle(ui, ai))
tf += dt_step
sp[i] = sp_i
return sp
def test_ref(r, time, setpoint):
ref = np.zeros((len(time), 9))
for i in range(1, time.size):
sp_quat = pa.quat_of_euler(setpoint[i])
r.update_quat(sp_quat, time[i] - time[i - 1])
euler = pa.euler_of_quat(r.quat)
ref[i] = np.concatenate((euler, r.vel, r.accel))
return ref
def plot_ref(time, xref=None, sp=None, figure=None):
margins = (0.05, 0.05, 0.98, 0.96, 0.20, 0.34)
figure = pu.prepare_fig(figure, window_title='Reference', figsize=(20.48, 10.24), margins=margins)
plots = [("$\phi$", "deg"), ("$\\theta$", "deg"), ("$\\psi$", "deg"),
("$p$", "deg/s"), ("$q$", "deg/s"), ("$r$", "deg/s"),
("$\dot{p}$", "deg/s2"), ("$\dot{q}$", "deg/s2"), ("$\dot{r}$", "deg/s2")]
for i, (title, ylab) in enumerate(plots):
ax = plt.subplot(3, 3, i + 1)
if xref is not None: plt.plot(time, pu.deg_of_rad(xref[:, i]))
pu.decorate(ax, title=title, ylab=ylab)
if sp is not None and i < 3:
plt.plot(time, pu.deg_of_rad(sp[:, i]))
return figure
dt = 1. / 512.
time = np.arange(0., 4, dt)
sp = np.zeros((len(time), 3))
sp[:, 0] = pu.rad_of_deg(45.) * scipy.signal.square(math.pi / 2 * time + math.pi)
# sp[:, 1] = pu.rad_of_deg(5.)*scipy.signal.square(math.pi/2*time)
# sp[:, 2] = pu.rad_of_deg(45.)
# sp = random_setpoint(time)
# rs = [ctl.att_ref_analytic_disc(axis=0), ctl.att_ref_analytic_cont(axis=0), ctl.att_ref_default()]
args = {'omega': 10., 'xi': 0.7, 'sat_vel': pu.rad_of_deg(150.), 'sat_accel': pu.rad_of_deg(1800),
'sat_jerk': pu.rad_of_deg(27000)}
rs = [ctl.att_ref_sat_naive(**args), ctl.att_ref_sat_nested(**args), ctl.att_ref_sat_nested2(**args)]
# rs.append(ctl.AttRefIntNative(**args))
rs.append(ctl.AttRefFloatNative(**args))
xrs = [test_ref(r, time, sp) for r in rs]
figure = None
for xr in xrs:
figure = plot_ref(time, xr, None, figure)
figure = plot_ref(time, None, sp, figure)
legends = [r.name for r in rs] + ['Setpoint']
plt.subplot(3, 3, 3)
plt.legend(legends)
plt.show()
| gpl-2.0 |
msultan/osprey | osprey/tests/test_cli_worker_and_dump.py | 1 | 4751 | from __future__ import print_function, absolute_import, division
import os
import os.path
import sys
import json
import shutil
import subprocess
import tempfile
from distutils.spawn import find_executable
from numpy.testing.decorators import skipif
try:
__import__('msmbuilder')
HAVE_MSMBUILDER = True
except:
HAVE_MSMBUILDER = False
OSPREY_BIN = find_executable('osprey')
@skipif(not HAVE_MSMBUILDER, 'this test requires MSMBuilder')
def test_msmbuilder_skeleton():
from msmbuilder.example_datasets import FsPeptide
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
FsPeptide(dirname).get()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'msmbuilder',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
@skipif(not HAVE_MSMBUILDER, 'this test requires MSMBuilder')
def test_msmb_feat_select_skeleton():
from msmbuilder.example_datasets import FsPeptide
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
FsPeptide(dirname).get()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'msmb_feat_select',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_sklearn_skeleton():
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'sklearn',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
subprocess.check_call([OSPREY_BIN, 'current_best', 'config.yaml'])
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_random_example():
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'random_example',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '2',
'-s', '23', '-j', '2'])
assert os.path.exists('osprey-trials.db')
subprocess.check_call([OSPREY_BIN, 'current_best', 'config.yaml'])
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_gp_example():
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'gp_example',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
subprocess.check_call([OSPREY_BIN, 'current_best', 'config.yaml'])
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_grid_example():
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'grid_example',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
subprocess.check_call([OSPREY_BIN, 'current_best', 'config.yaml'])
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def _test_dump_1():
out = subprocess.check_output(
[OSPREY_BIN, 'dump', 'config.yaml', '-o', 'json'])
if sys.version_info >= (3, 0):
out = out.decode()
json.loads(out)
def _test_plot_1():
_ = subprocess.check_output(
[OSPREY_BIN, 'plot', 'config.yaml', '--no-browser'])
if not os.path.isfile('./plot.html'):
raise ValueError('Plot not created')
| apache-2.0 |
jwiggins/scikit-image | skimage/transform/tests/test_radon_transform.py | 13 | 14551 | from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_raises
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
radon_image_200 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
reconstructed = iradon(radon_image_200)
delta_200 = np.mean(abs(_rescale_intensity(image) - _rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta)
reconstructed = iradon(sinogram, theta)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2])
iradon(p, theta=[0, 1, 2])
assert_raises(ValueError, iradon, p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
argmax_shape = lambda a: np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square)
== argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
zappala/python-plotting-tcp | plot-queue.py | 1 | 2264 | import optparse
import sys
import matplotlib
from pylab import *
# Class that parses a file of queue events and plots a graph over time
class Plotter:
def __init__(self,file):
""" Initialize plotter with a file name. """
self.file = file
self.data = []
self.min_time = None
self.max_time = None
def parse(self):
""" Parse the data file """
first = None
f = open(self.file)
for line in f.readlines():
if line.startswith("#"):
continue
try:
t,size = line.split()
except:
continue
t = float(t)
try:
size = int(size)
except:
pass
self.data.append((t,size))
if not self.min_time or t < self.min_time:
self.min_time = t
if not self.max_time or t > self.max_time:
self.max_time = t
def plot(self):
""" Create a line graph of the queue size over time. """
clf()
x = []
y = []
dropX = []
dropY = []
i = 0
max_queue = 20
max = None
for (t,size) in self.data:
if size == 'x':
dropX.append(t)
dropY.append(max_queue+1)
else:
x.append(t)
y.append(size)
plot(x,y)
scatter(dropX,dropY,marker='x',color='black')
xlabel('Time (seconds)')
ylabel('Queue Size (packets)')
xlim([self.min_time,self.max_time])
ylim([0,max_queue+2])
savefig('queue.png')
def parse_options():
# parse options
parser = optparse.OptionParser(usage = "%prog [options]",
version = "%prog 0.1")
parser.add_option("-f","--file",type="string",dest="file",
default=None,
help="file")
(options,args) = parser.parse_args()
return (options,args)
if __name__ == '__main__':
(options,args) = parse_options()
if options.file == None:
print "plot.py -f file"
sys.exit()
p = Plotter(options.file)
p.parse()
p.plot()
| gpl-2.0 |
mtpain/metacorps | projects/viomet/analysis.py | 1 | 16688 | '''
Author: Matthew Turner <maturner01@gmail.com>
Date: April 01, 2017
'''
import numpy as np
import pandas as pd
from collections import Counter
from datetime import datetime
from functools import reduce
from rpy2.robjects.packages import importr
from rpy2 import robjects as ro
from rpy2.robjects import pandas2ri
from app.models import IatvCorpus
from projects.common import (
daily_frequency, daily_metaphor_counts, get_project_data_frame
)
pandas2ri.activate()
R = ro.r
# glmer = importr('lme4').glmer
lme = importr('lme4').lmer
lm = R.lm
extractAIC = importr('stats').extractAIC
coef = importr('stats').coef
def get_pvalue(model):
return importr('base').summary(model).rx2('coefficients')[-1]
DEFAULT_FIRST_DATES = [
datetime(2016, 9, d) for d in range(20, 31)
] + [
datetime(2016, 10, d) for d in range(1, 15)
]
DEFAULT_SECOND_DATES = [
datetime(2016, 10, d) for d in range(15, 32)
] + [
datetime(2016, 11, d) for d in range(1, 30)
]
def partition_AICs(df,
candidate_excited_date_pairs=[()],
model_formula='count ~ phase + network + facet + (1|date)',
verbose=False,
poisson=False
):
'''
Given a dataframe with columns "date", "network", "facet", and "count",
generates a dataframe with the AIC of each partition date.
'''
d = {
'first_date': [],
'last_date': [],
'AIC': [],
'coef': [],
'model': []
}
for first_date, last_date in candidate_excited_date_pairs:
phase_df = add_phases(df, first_date, last_date)
# If there are not two states (ground and excited), don't model.
# This happens when neither first or last is in the df.date column
# or if the excited state takes up all available dates, e.g. 9-1 to
# 11-29 and there is no data for 11-30.
if (len(phase_df.state.unique()) < 2
or np.sum(phase_df.state == 'excited') < 10):
continue
d['first_date'].append(first_date)
d['last_date'].append(last_date)
if verbose:
print(
'Calculating for d1={} & d2={}'.format(first_date, last_date)
)
if poisson:
# Hacky, but need to transform to integer for poisson and
# there are at most two shows on a day, so the fraction part
# of frequency is 1/2 or 0.
phase_df.freq *= 2
model = lm(
model_formula,
family='poisson',
data=phase_df
)
d['coef'].append(list(coef(model)))
else:
model = lm(
model_formula,
data=phase_df
)
d['coef'].append(list(coef(model)))
d['AIC'].append(extractAIC(model)[1])
d['model'].append(model)
return pd.DataFrame(d)
def add_phases(df, date1=datetime(2016, 9, 26),
date2=datetime(2016, 10, 20)):
'''
Create a dataframe with a new 'state' column
'''
phase = []
ret = df.copy()
# XXX super confusing with all the "date"s floating around.
for i, d in enumerate([d for d in df.date]):
if date1.date() > d.date():
phase.append('ground')
elif date1.date() <= d.date() and d.date() <= date2.date():
phase.append('excited')
else:
phase.append('ground')
ret['state'] = phase
return ret
def relative_likelihood(aic_min, aic_other):
return np.exp((aic_min - aic_other)/2.0)
class PartitionInfo:
def __init__(self,
partition_date_1,
partition_date_2,
f_ground,
f_excited):
self.partition_date_1 = partition_date_1
self.partition_date_2 = partition_date_2
self.f_ground = f_ground
self.f_excited = f_excited
@classmethod
def from_fit(cls, fit):
partition_date_1 = fit.first_date
partition_date_2 = fit.last_date
# R model returns the excited state freq as intercept b/c alphabetical
f_excited = fit.coef[0]
# The slope is the second coefficient; it will be negative if
# hypothesis is correct.
f_ground = f_excited + fit.coef[1]
return cls(partition_date_1, partition_date_2, f_ground, f_excited)
def partition_info_table(viomet_df,
date_range,
partition_infos):
'''
TODO
'''
index_keys = [('MSNBC', 'MSNBCW'),
('CNN', 'CNNW'),
('Fox News', 'FOXNEWSW')]
columns = ['$t_0^{(2)}$', '$t^{(2)}_{N^{(2)}}$', '$f^{(1)}$',
'$f^{(2)}$', 'reactivity', 'total uses']
counts_df = daily_metaphor_counts(
viomet_df, date_range, by=['network']
)
data = []
for ik in index_keys:
key = ik[1]
pi = partition_infos[key]
data.append([
pi.partition_date_1,
pi.partition_date_2,
pi.f_ground,
pi.f_excited,
((pi.f_excited - pi.f_ground) / pi.f_ground),
counts_df[key].sum()
])
index = [ik[0] for ik in index_keys]
return pd.DataFrame(data=data, index=index, columns=columns)
def by_network_word_table(viomet_df,
date_range,
partition_infos,
words=['hit', 'beat', 'attack']
):
'''
Second table in paper
'''
networks = ['MSNBC', 'CNN', 'Fox News']
columns = ['fg', 'fe', 'reactivity', 'total uses']
# index_tuples = [(net, word) for net in networks for word in words]
index_tuples = [(word, net) for word in words for net in networks]
index = pd.MultiIndex.from_tuples(
index_tuples, names=['Violent Word', 'Network']
)
df = pd.DataFrame(index=index, columns=columns, data=0.0)
counts_df = daily_metaphor_counts(
viomet_df, date_range, by=['network', 'facet_word']
)
for idx, netid in enumerate(['MSNBCW', 'CNNW', 'FOXNEWSW']):
sum_g, n_g = _get_ground(
counts_df, netid, partition_infos, words=words
)
sum_e, n_e = _get_excited(
counts_df, netid, partition_infos, words=words
)
freq_g = sum_g / n_g
freq_e = sum_e / n_e
reactivity = ((freq_e - freq_g) / freq_g)
totals = sum_g + sum_e
network = networks[idx]
for word in words:
df.loc[word, network] = [
freq_g[word], freq_e[word], reactivity[word], totals[word]
]
fancy_columns = ['$f^{(1)}$', '$f^{(2)}$', 'reactivity', 'total uses']
df.columns = fancy_columns
return df
def model_fits_table(viomet_df, date_range, network_fits, top_n=10):
'''
Relative ikelihoods of null model vs the best dynamic model fit and
greater-AIC dynamic model fits vs the best dynamic model fit.
Arguments:
network_fits (dict): keyed by network, values are lists where the
last element in the list is the dataframe of alternate
start and end dates with AIC values of the associated
fitted model. TODO: improve network_fits setup; it's opaque,
each value of that dict is a list with three elements I think.
top_n (int): number of top-performing models by relative likelihood
to include in table.
'''
networks = ['MSNBCW', 'CNNW', 'FOXNEWSW']
ret = {}
for network in networks:
network_df = network_fits[network][-1]
# Need to extract minimum AIC as the best; likelihood relative to it.
low = network_df.AIC.min()
network_df.loc[:, 'rl'] = relative_likelihood(low, network_df.AIC)
# The least AIC min has a relative likelihood of 1.0; remove it.
# network_df = network_df[network_df.rl != 1.0]
network_df.sort_values('rl', ascending=False, inplace=True)
# If there are exact duplicates of relative likelihood it's due to
# there not being data between one of the candidate partition dates.
network_df.drop_duplicates(subset='rl', inplace=True)
network_df = network_df.iloc[:top_n]
network_df['pvalue'] = [
get_pvalue(model) for model in list(network_df.model)
]
# Multiply by -1.0 b/c excited
# treated as "less" than ground due to alpha ordering in R.
# c1 is the ``excited'' region frequency, which is really just the
# second region; c2 is ground frequency - excited frequency. Thus
# c1 + c2 = ground frequency.
network_df['reactivity'] = [
-1.0 * (c2 / (c1 + c2)) for c1, c2 in network_df['coef']
]
ret_df = network_df[
['rl', 'first_date', 'last_date', 'reactivity', 'pvalue']
]
ret_df.columns = [
'rel. lik.', '$t_0^{(2)}$', '$t^{(2)}_{N^{(2)}}$',
'reactivity', '$P(<|t|)$'
]
ret.update({network: ret_df})
return ret
def by_network_subj_obj_table(viomet_df,
date_range,
partition_infos,
subjects=['Barack Obama', 'Mitt Romney'],
objects=['Barack Obama', 'Mitt Romney']):
'''
TODO
'''
networks = ['MSNBC', 'CNN', 'Fox News']
columns = ['fg', 'fe', 'reactivity', 'total uses']
# index_tuples = [(net, word) for net in networks for word in words]
subj_objs = ["Subject=" + subj for subj in subjects] \
+ ["Object=" + obj for obj in objects]
index_tuples = [(so, net) for so in subj_objs for net in networks]
index = pd.MultiIndex.from_tuples(
index_tuples, names=['Subject/Object', 'Network']
)
df = pd.DataFrame(index=index, columns=columns, data=0.0)
# Next two blocks support more than two subjects or objects.
subject_rows = reduce(
lambda x, y: (viomet_df.subjects == x) | (viomet_df.subjects == y),
subjects
)
object_rows = reduce(
lambda x, y: (viomet_df.objects == x) | (viomet_df.objects == y),
objects
)
subject_df = viomet_df[subject_rows]
object_df = viomet_df[object_rows]
subject_counts_df = daily_metaphor_counts(
subject_df, date_range, by=['network', 'subjects'],
)
object_counts_df = daily_metaphor_counts(
object_df, date_range, by=['network', 'objects']
)
for idx, network_id in enumerate(['MSNBCW', 'CNNW', 'FOXNEWSW']):
# Ground state data.
sum_subj_g, n_subj_g = _get_ground(
subject_counts_df, network_id, partition_infos
)
sum_obj_g, n_obj_g = _get_ground(
object_counts_df, network_id, partition_infos
)
# Excited state data.
sum_subj_e, n_subj_e = _get_excited(
subject_counts_df, network_id, partition_infos
)
sum_obj_e, n_obj_e = _get_excited(
object_counts_df, network_id, partition_infos
)
freq_subj_g = sum_subj_g / n_subj_g
freq_obj_g = sum_obj_g / n_obj_g
freq_subj_e = sum_subj_e / n_subj_e
freq_obj_e = sum_obj_e / n_obj_e
reactivity_diff_subj = ((freq_subj_e - freq_subj_g) / 2.0)
reactivity_diff_obj = ((freq_obj_e - freq_obj_g) / 2.0)
totals_subj = sum_subj_g + sum_subj_e
totals_obj = sum_obj_g + sum_obj_e
network = networks[idx]
for subject in subjects:
df.loc["Subject=" + subject, network] = [
freq_subj_g[subject],
freq_subj_e[subject],
reactivity_diff_subj[subject],
totals_subj[subject]
]
for object_ in objects:
df.loc["Object=" + object_, network] = [
freq_obj_g[object_],
freq_obj_e[object_],
reactivity_diff_obj[object_],
totals_obj[object_]
]
fancy_columns = ['$f^{(1)}$', '$f^{(2)}$', 'reactivity', 'total uses']
df.columns = fancy_columns
return df
def _get_ground(counts_df, network_id, partition_infos,
words=None, subj_objs=None):
cdf = counts_df
net_pi = partition_infos[network_id]
ground_dates = ((cdf.index < net_pi.partition_date_1.date()) |
(cdf.index > net_pi.partition_date_2.date()))
ret = cdf[ground_dates][network_id].sum()
n_ground = Counter(ground_dates)[True]
if words is not None:
# Only take the indices of interest; these are 1D.
return ret.loc[words], n_ground
else:
return ret, n_ground
def _get_excited(counts_df, network_id, partition_infos,
words=None, subj_objs=None):
cdf = counts_df
net_pi = partition_infos[network_id]
excited_dates = ((cdf.index >= net_pi.partition_date_1.date()) &
(cdf.index <= net_pi.partition_date_2.date()))
ret = cdf[excited_dates][network_id].sum()
n_excited = Counter(excited_dates)[True]
if words is not None:
# Only take the indices of interest; these are 1D.
return ret.loc[words], n_excited
else:
return ret, n_excited
def viomet_analysis_setup(year=2012):
'''
Returns:
viomet_df and partition_infos
'''
if year == 2012:
iatv_corpus_name = 'Viomet Sep-Nov 2012'
metaphors_url = 'http://metacorps.io/static/data/' + \
'viomet-2012-snapshot-project-df.csv'
date_range = pd.date_range('2012-9-1', '2012-11-30', freq='D')
if year == 2016:
iatv_corpus_name = 'Viomet Sep-Nov 2016'
metaphors_url = 'http://metacorps.io/static/data/' + \
'viomet-2016-snapshot-project-df.csv'
date_range = pd.date_range('2016-9-1', '2016-11-30', freq='D')
viomet_df = get_project_data_frame(metaphors_url)
fits = fit_all_networks(viomet_df, date_range, iatv_corpus_name)
networks = ['MSNBCW', 'CNNW', 'FOXNEWSW']
partition_infos = {network: fits[network][0]
for network in networks}
return viomet_df, date_range, partition_infos
def fit_all_networks(df, date_range, iatv_corpus_name,
by_network=True, poisson=False, verbose=False):
ic = IatvCorpus.objects(name=iatv_corpus_name)[0]
# The first date of date_range can't be the last excited state date.
last_excited_date_candidates = date_range[1:]
candidate_excited_date_pairs = [
(fd, ld)
for ld in last_excited_date_candidates
for fd in date_range[date_range < ld]
]
if by_network:
if iatv_corpus_name is None:
raise RuntimeError(
'If by_network=True, must provide iatv_corpus_name'
)
network_freq = daily_frequency(df, date_range, ic, by=['network'])
results = {}
for network in ['MSNBCW', 'CNNW', 'FOXNEWSW']:
single_network = \
network_freq[network].to_frame().reset_index().dropna()
# this is ugly but required to match partition_AICs at this time
single_network.columns = ['date', 'freq']
all_fits = partition_AICs(single_network,
candidate_excited_date_pairs,
model_formula='freq ~ state',
poisson=poisson,
verbose=verbose)
# The first date of the second level state cannot be the first
# date in the dataset.
all_fits = all_fits[all_fits.first_date != datetime(2012, 9, 1)]
# The best fit is the one with the minimum AIC.
best_fit = all_fits.iloc[all_fits['AIC'].idxmin()]
# PartitionInfo provides a data structure wrapper around data row.
pinfo = PartitionInfo.from_fit(best_fit)
if poisson:
pinfo.f_ground /= 2.0
pinfo.f_excited /= 2.0
results.update({network: (pinfo, best_fit, all_fits)})
return results
else:
all_freq = daily_frequency(df, date_range, ic).reset_index().dropna()
all_freq.columns = ['date', 'freq']
all_fits = partition_AICs(all_freq,
candidate_excited_date_pairs,
model_formula='freq ~ state')
best_fit = all_fits.iloc[all_fits['AIC'].idxmin()]
return best_fit
| bsd-3-clause |
avalentino/hashsum | tests/benchmarks.py | 1 | 4133 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import timeit
import collections
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
DATAFILE = 'hashsum_test_data.dat'
DATASIZE = 1024**3 # 1GB
CHUNKSIZE = 1024**2 # 1MB
BASEBLOCKSIZE = 8192 # bytes (8KB)
NRUNS = 3
ALGO = 'md5' # 'sha512'
RESULTFILE = 'benchmarks.dat'
RESULTPLOT = 'benchmarks.svg'
def generate_dataset(filename, size, chunksize=CHUNKSIZE):
import numpy as np
print('Generating dataset at', filename, '...')
data = np.random.randint(0, 255, size=chunksize, dtype=np.uint8).tobytes()
nblocks, spare = divmod(size, chunksize)
with open(filename, 'wb') as fd:
for i in range(nblocks):
fd.write(data)
else:
fd.write(data[:spare])
print('Dataset generation completed')
def save_data(data, filename=RESULTFILE):
import pickle
with open(filename, 'wb') as fd:
pickle.dump(data, fd)
def load_data(filename=RESULTFILE):
import pickle
with open(RESULTFILE, 'rb') as fd:
data = pickle.load(fd)
return data
def plot_data(testdata):
import numpy as np
from matplotlib import pyplot as plt
plt.figure(figsize=(14, 6))
plt.subplot(1, 2, 1)
label = 'sequential'
data = testdata['_compute_file_checksum_sequential']
x_seq = np.asarray(sorted(data.keys()), dtype='float')
y_seq = np.asarray([data[key] for key in x_seq])
x_seq *= BASEBLOCKSIZE
plt.semilogx(x_seq, y_seq, 'o-', label=label)
plt.grid(True)
label = 'threading'
data = testdata['_compute_file_checksum_threading']
x_thr = np.asarray(sorted(data.keys()), dtype='float')
y_thr = np.asarray([data[key] for key in x_thr])
x_thr *= BASEBLOCKSIZE
plt.semilogx(x_thr, y_thr, 'o-', label=label)
plt.xlabel('Block size')
plt.ylabel('Time [s]')
plt.title('Checksum computation benchmark')
plt.legend(loc='best')
plt.subplot(1, 2, 2)
plt.grid(True)
plt.semilogx(
x_seq, (y_seq / y_thr - 1) * 100., 'o-', label='Speed up (thr)')
plt.semilogx(x_seq, (np.min(y_seq) / y_thr - 1) * 100., 'o-',
label='Speed up (thr)\nvs max seq speed')
plt.axvline(x_seq[np.argmin(y_seq)], color='k')
plt.axvline(x_thr[np.argmin(y_thr)], color='k')
plt.xlabel('Block size')
plt.ylabel('Speed up [%]')
plt.title('Speed up')
plt.legend(loc='best')
plt.savefig(RESULTPLOT)
plt.show()
def main():
if not os.path.isfile(DATAFILE):
generate_dataset(DATAFILE, DATASIZE)
elif os.stat(DATAFILE).st_size != DATASIZE:
os.remove(DATAFILE)
generate_dataset(DATAFILE, DATASIZE)
print('DATAFILE:', DATAFILE)
print('DATASIZE: {:.3f} MB'.format(DATASIZE / 1024**2))
print('Test {} hash computetion'.format(ALGO))
functions = (
'_compute_file_checksum_threading',
'_compute_file_checksum_sequential',
)
multipliers = (
8*1024, 4*1024, 2*1024, 1024,
512, 256, 128, 64, 32, 16, 8, 4, 2)
data = collections.defaultdict(dict)
for multiplier in multipliers:
blocksize = BASEBLOCKSIZE * multiplier
for function in functions:
if 'sequential' in function:
expr = 'hashsum.main("-a=%s", "%s")' % (ALGO, DATAFILE)
else:
expr = 'hashsum.main("-a=%s", "-m", "%s")' % (ALGO, DATAFILE)
print('function:', function)
print('blocksize: {:.1f} KB ({} * {})'.format(
blocksize/1024, BASEBLOCKSIZE, multiplier))
print('timeit:', expr)
t = timeit.timeit(
expr,
'import hashsum\n'
'hashsum._QUEUE_LEN = 10\n'
'hashsum.BLOCKSIZE = %s\n' % blocksize,
number=NRUNS) / NRUNS
print('Mean execution time: %f sec' % t)
data[function][multiplier] = t
save_data(data, RESULTFILE)
plot_data(data)
if __name__ == '__main__':
if True:
main()
else:
plot_data(load_data(RESULTFILE))
| bsd-3-clause |
eyantrainternship/eYSIP_2015_Depth_Mapping_Kinect | Resources/Examples/PycharmProjects/Python freenect examples/demo_mp_async.py | 4 | 1034 | #!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import signal
import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def body(*args):
if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
| cc0-1.0 |
nutils/nutils | examples/laplace.py | 1 | 7319 | #! /usr/bin/env python3
#
# In this script we solve the Laplace equation :math:`u_{,kk} = 0` on a unit
# square domain :math:`Ω` with boundary :math:`Γ`, subject to boundary
# conditions:
#
# .. math:: u &= 0 && Γ_{\rm left}
#
# ∂_n u &= 0 && Γ_{\rm bottom}
#
# ∂_n u &= \cos(1) \cosh(x_1) && Γ_{\rm right}
#
# u &= \cosh(1) \sin(x_0) && Γ_{\rm top}
#
# This case is constructed to contain all combinations of homogenous and
# heterogeneous, Dirichlet and Neumann type boundary conditions, as well as to
# have a known exact solution:
#
# .. math:: u_{\rm exact} = \sin(x_0) \cosh(x_1).
#
# We start by importing the necessary modules.
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the mesh density (in number of elements along an edge),
# element type (square, triangle, or mixed), type of basis function (std or
# spline, with availability depending on element type), and polynomial degree.
def main(nelems: 'number of elements along edge' = 10,
etype: 'type of elements (square/triangle/mixed)' = 'square',
btype: 'type of basis function (std/spline)' = 'std',
degree: 'polynomial degree' = 1):
# A unit square domain is created by calling the
# :func:`nutils.mesh.unitsquare` mesh generator, with the number of elements
# along an edge as the first argument, and the type of elements ("square",
# "triangle", or "mixed") as the second. The result is a topology object
# ``domain`` and a vectored valued geometry function ``geom``.
domain, geom = nutils.mesh.unitsquare(nelems, etype)
# To be able to write index based tensor contractions, we need to bundle all
# relevant functions together in a namespace. Here we add the geometry ``x``,
# a scalar ``basis``, and the solution ``u``. The latter is formed by
# contracting the basis with a to-be-determined solution vector ``?lhs``.
ns = nutils.function.Namespace()
ns.x = geom
ns.basis = domain.basis(btype, degree=degree)
ns.u = 'basis_n ?lhs_n'
# We are now ready to implement the Laplace equation. In weak form, the
# solution is a scalar field :math:`u` for which:
#
# .. math:: ∀ v: ∫_Ω v_{,k} u_{,k} - ∫_{Γ_n} v f = 0.
#
# By linearity the test function :math:`v` can be replaced by the basis that
# spans its space. The result is an integral ``res`` that evaluates to a
# vector matching the size of the function space.
res = domain.integral('basis_n,i u_,i d:x' @ ns, degree=degree*2)
res -= domain.boundary['right'].integral('basis_n cos(1) cosh(x_1) d:x' @ ns, degree=degree*2)
# The Dirichlet constraints are set by finding the coefficients that minimize
# the error:
#
# .. math:: \min_u ∫_{\Gamma_d} (u - u_d)^2
#
# The resulting ``cons`` array holds numerical values for all the entries of
# ``?lhs`` that contribute (up to ``droptol``) to the minimization problem.
# All remaining entries are set to ``NaN``, signifying that these degrees of
# freedom are unconstrained.
sqr = domain.boundary['left'].integral('u^2 d:x' @ ns, degree=degree*2)
sqr += domain.boundary['top'].integral('(u - cosh(1) sin(x_0))^2 d:x' @ ns, degree=degree*2)
cons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
# The unconstrained entries of ``?lhs`` are to be determined such that the
# residual vector evaluates to zero in the corresponding entries. This step
# involves a linearization of ``res``, resulting in a jacobian matrix and
# right hand side vector that are subsequently assembled and solved. The
# resulting ``lhs`` array matches ``cons`` in the constrained entries.
lhs = nutils.solver.solve_linear('lhs', res, constrain=cons)
# Once all entries of ``?lhs`` are establised, the corresponding solution can
# be vizualised by sampling values of ``ns.u`` along with physical
# coordinates ``ns.x``, with the solution vector provided via the
# ``arguments`` dictionary. The sample members ``tri`` and ``hull`` provide
# additional inter-point information required for drawing the mesh and
# element outlines.
bezier = domain.sample('bezier', 9)
x, u = bezier.eval(['x_i', 'u'] @ ns, lhs=lhs)
nutils.export.triplot('solution.png', x, u, tri=bezier.tri, hull=bezier.hull)
# To confirm that our computation is correct, we use our knowledge of the
# analytical solution to evaluate the L2-error of the discrete result.
err = domain.integral('(u - sin(x_0) cosh(x_1))^2 d:x' @ ns, degree=degree*2).eval(lhs=lhs)**.5
nutils.log.user('L2 error: {:.2e}'.format(err))
return cons, lhs, err
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# laplace.py`. To select mixed elements and quadratic basis functions add
# :sh:`python3 laplace.py etype=mixed degree=2`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(nutils.testing.TestCase):
@nutils.testing.requires('matplotlib')
def test_default(self):
cons, lhs, err = main(nelems=4, etype='square', btype='std', degree=1)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNrbKPv1QZ3ip9sL1BgaILDYFMbaZwZj5ZnDWNfNAeWPESU=''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNoBMgDN/7Ed9eB+IfLboCaXNKc01DQaNXM14jXyNR82ZTa+NpI2oTbPNhU3bjf7Ngo3ODd+N9c3SNEU
1g==''')
with self.subTest('L2-error'):
self.assertAlmostEqual(err, 1.63e-3, places=5)
@nutils.testing.requires('matplotlib')
def test_spline(self):
cons, lhs, err = main(nelems=4, etype='square', btype='spline', degree=2)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNqrkmN+sEfhzF0xleRbDA0wKGeCYFuaIdjK5gj2aiT2VXMAJB0VAQ==''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNqrkmN+sEfhzF0xleRbrsauxsnGc43fGMuZJJgmmNaZ7jBlN7M08wLCDLNFZh/NlM0vmV0y+2CmZV5p
vtr8j9kfMynzEPPF5lfNAcuhGvs=''')
with self.subTest('L2-error'):
self.assertAlmostEqual(err, 8.04e-5, places=7)
@nutils.testing.requires('matplotlib')
def test_mixed(self):
cons, lhs, err = main(nelems=4, etype='mixed', btype='std', degree=2)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNorfLZF2ucJQwMC3pR7+QDG9lCquAtj71Rlu8XQIGfC0FBoiqweE1qaMTTsNsOvRtmcoSHbHL+a1UD5
q+YAxhcu1g==''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNorfLZF2ueJq7GrcYjxDJPpJstNbsq9fOBr3Gh8xWS7iYdSxd19xseMP5hImu5UZbv1xljOxM600DTW
NN/0k2mC6SPTx6Z1pnNMGc3kzdaaPjRNMbMyEzWzNOsy223mBYRRZpPNJpktMks1azM7Z7bRbIXZabNX
ZiLmH82UzS3Ns80vmj004za/ZPYHCD+Y8ZlLmVuYq5kHm9eahwDxavPF5lfNAWFyPdk=''')
with self.subTest('L2-error'):
self.assertAlmostEqual(err, 1.25e-4, places=6)
| mit |
zuku1985/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
thunderhoser/GewitterGefahr | gewittergefahr/dissertation/plot_data_augmentation.py | 1 | 9894 | """Plots data augmentation."""
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.scripts import plot_input_examples as plot_examples
RADAR_FIELD_NAME = radar_utils.REFL_NAME
RADAR_HEIGHT_M_AGL = 3000
NORMALIZATION_TYPE_STRING = dl_utils.Z_NORMALIZATION_TYPE_STRING
X_TRANSLATIONS_PX = numpy.array([3], dtype=int)
Y_TRANSLATIONS_PX = numpy.array([3], dtype=int)
CCW_ROTATION_ANGLES_DEG = numpy.array([30.])
NOISE_STANDARD_DEVIATION = 0.1
TITLE_FONT_SIZE = 30
FIGURE_RESOLUTION_DPI = 300
FILE_NAME_TIME_FORMAT = '%Y-%m-%d-%H%M%S'
EXAMPLE_FILE_ARG_NAME = 'input_example_file_name'
EXAMPLE_INDICES_ARG_NAME = 'example_indices'
NUM_ROWS_ARG_NAME = 'num_radar_rows'
NUM_COLUMNS_ARG_NAME = 'num_radar_columns'
NORMALIZATION_FILE_ARG_NAME = 'normalization_file_name'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
EXAMPLE_FILE_HELP_STRING = (
'Path to example file. Radar images will be read from here by '
'`input_examples.read_example_file`.')
EXAMPLE_INDICES_HELP_STRING = (
'1-D list of example indices in file. This script will create one figure '
'for each example.')
NUM_ROWS_HELP_STRING = (
'Number of rows in radar grid. To use all rows in the file, leave this '
'alone.')
NUM_COLUMNS_HELP_STRING = (
'Number of columns in radar grid. To use all columns in the file, leave '
'this alone.')
NORMALIZATION_FILE_HELP_STRING = (
'Path to normalization file. Will be read by `deep_learning_utils.'
'read_normalization_params_from_file` and used to normalize radar images.')
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_FILE_ARG_NAME, type=str, required=True,
help=EXAMPLE_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + EXAMPLE_INDICES_ARG_NAME, type=int, nargs='+', required=True,
help=EXAMPLE_INDICES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_ROWS_ARG_NAME, type=int, required=False, default=-1,
help=NUM_ROWS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_COLUMNS_ARG_NAME, type=int, required=False, default=-1,
help=NUM_COLUMNS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NORMALIZATION_FILE_ARG_NAME, type=str, required=True,
help=NORMALIZATION_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _plot_one_example(
orig_radar_matrix, translated_radar_matrix, rotated_radar_matrix,
noised_radar_matrix, output_dir_name, full_storm_id_string,
storm_time_unix_sec):
"""Plots original and augmented radar images for one example.
M = number of rows in grid
N = number of columns in grid
:param orig_radar_matrix: M-by-N-by-1-by-1 numpy array with original values.
:param translated_radar_matrix: Same but with translated values.
:param rotated_radar_matrix: Same but with rotated values.
:param noised_radar_matrix: Same but with noised values.
:param output_dir_name: Name of output directory (figure will be saved
here).
:param full_storm_id_string: Storm ID.
:param storm_time_unix_sec: Storm time.
"""
dummy_heights_m_agl = numpy.array([1000, 2000, 3000, 4000], dtype=int)
concat_radar_matrix = numpy.concatenate((
orig_radar_matrix, translated_radar_matrix, rotated_radar_matrix,
noised_radar_matrix
), axis=-2)
training_option_dict = {
trainval_io.SOUNDING_FIELDS_KEY: None,
trainval_io.RADAR_FIELDS_KEY: [RADAR_FIELD_NAME],
trainval_io.RADAR_HEIGHTS_KEY: dummy_heights_m_agl
}
model_metadata_dict = {cnn.TRAINING_OPTION_DICT_KEY: training_option_dict}
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=[concat_radar_matrix],
model_metadata_dict=model_metadata_dict,
pmm_flag=True, plot_sounding=False, allow_whitespace=True,
plot_panel_names=False, add_titles=False, label_colour_bars=True,
num_panel_rows=2)
figure_object = handle_dict[plot_examples.RADAR_FIGURES_KEY][0]
axes_object_matrix = handle_dict[plot_examples.RADAR_AXES_KEY][0]
axes_object_matrix[0, 0].set_title('(a) Original', fontsize=TITLE_FONT_SIZE)
axes_object_matrix[0, 1].set_title(
'(b) Translated', fontsize=TITLE_FONT_SIZE
)
axes_object_matrix[1, 0].set_title(
r'(c) Rotated 30$^{\circ}$ clockwise', fontsize=TITLE_FONT_SIZE
)
axes_object_matrix[1, 1].set_title('(d) Noised', fontsize=TITLE_FONT_SIZE)
output_file_name = '{0:s}/storm={1:s}_time={2:s}.jpg'.format(
output_dir_name, full_storm_id_string.replace('_', '-'),
time_conversion.unix_sec_to_string(
storm_time_unix_sec, FILE_NAME_TIME_FORMAT)
)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
figure_object.savefig(
output_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(figure_object)
def _run(example_file_name, example_indices, num_radar_rows, num_radar_columns,
normalization_file_name, output_dir_name):
"""Plots data augmentation.
This is effectively the main method.
:param example_file_name: See documentation at top of file.
:param example_indices: Same.
:param num_radar_rows: Same.
:param num_radar_columns: Same.
:param normalization_file_name: Same.
:param output_dir_name: Same.
"""
if num_radar_rows <= 0:
num_radar_rows = None
if num_radar_columns <= 0:
num_radar_columns = None
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
print('Reading data from: "{0:s}"...'.format(example_file_name))
example_dict = input_examples.read_example_file(
netcdf_file_name=example_file_name,
read_all_target_vars=True, include_soundings=False,
num_rows_to_keep=num_radar_rows, num_columns_to_keep=num_radar_columns,
radar_field_names_to_keep=[RADAR_FIELD_NAME],
radar_heights_to_keep_m_agl=numpy.array([RADAR_HEIGHT_M_AGL], dtype=int)
)
if input_examples.REFL_IMAGE_MATRIX_KEY in example_dict:
radar_matrix = example_dict[input_examples.REFL_IMAGE_MATRIX_KEY]
else:
radar_matrix = example_dict[input_examples.RADAR_IMAGE_MATRIX_KEY]
num_examples_total = radar_matrix.shape[0]
error_checking.assert_is_geq_numpy_array(example_indices, 0)
error_checking.assert_is_less_than_numpy_array(
example_indices, num_examples_total)
radar_matrix = radar_matrix[example_indices, ...]
full_storm_id_strings = [
example_dict[input_examples.FULL_IDS_KEY][k] for k in example_indices
]
storm_times_unix_sec = example_dict[input_examples.STORM_TIMES_KEY][
example_indices]
radar_matrix = dl_utils.normalize_radar_images(
radar_image_matrix=radar_matrix, field_names=[RADAR_FIELD_NAME],
normalization_type_string=NORMALIZATION_TYPE_STRING,
normalization_param_file_name=normalization_file_name)
num_examples = radar_matrix.shape[0]
dummy_target_values = numpy.full(num_examples, 0, dtype=int)
radar_matrix = trainval_io._augment_radar_images(
list_of_predictor_matrices=[radar_matrix],
target_array=dummy_target_values,
x_translations_pixels=X_TRANSLATIONS_PX,
y_translations_pixels=Y_TRANSLATIONS_PX,
ccw_rotation_angles_deg=CCW_ROTATION_ANGLES_DEG,
noise_standard_deviation=NOISE_STANDARD_DEVIATION,
num_noisings=1, flip_in_x=False, flip_in_y=False
)[0][0]
radar_matrix = dl_utils.denormalize_radar_images(
radar_image_matrix=radar_matrix, field_names=[RADAR_FIELD_NAME],
normalization_type_string=NORMALIZATION_TYPE_STRING,
normalization_param_file_name=normalization_file_name)
orig_radar_matrix = radar_matrix[:num_examples, ...]
radar_matrix = radar_matrix[num_examples:, ...]
translated_radar_matrix = radar_matrix[:num_examples, ...]
radar_matrix = radar_matrix[num_examples:, ...]
rotated_radar_matrix = radar_matrix[:num_examples, ...]
noised_radar_matrix = radar_matrix[num_examples:, ...]
for i in range(num_examples):
_plot_one_example(
orig_radar_matrix=orig_radar_matrix[i, ...],
translated_radar_matrix=translated_radar_matrix[i, ...],
rotated_radar_matrix=rotated_radar_matrix[i, ...],
noised_radar_matrix=noised_radar_matrix[i, ...],
output_dir_name=output_dir_name,
full_storm_id_string=full_storm_id_strings[i],
storm_time_unix_sec=storm_times_unix_sec[i]
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
example_file_name=getattr(INPUT_ARG_OBJECT, EXAMPLE_FILE_ARG_NAME),
example_indices=numpy.array(
getattr(INPUT_ARG_OBJECT, EXAMPLE_INDICES_ARG_NAME), dtype=int
),
num_radar_rows=getattr(INPUT_ARG_OBJECT, NUM_ROWS_ARG_NAME),
num_radar_columns=getattr(INPUT_ARG_OBJECT, NUM_COLUMNS_ARG_NAME),
normalization_file_name=getattr(
INPUT_ARG_OBJECT, NORMALIZATION_FILE_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| mit |
henningjp/CoolProp | dev/pseudo-pure/fit_pseudo-pure_eos.py | 2 | 23647 | import numpy as np
from CoolProp.CoolProp import Props
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.optimize
import scipy.stats
import random
import h5py
from templates import *
indices = []
class TermLibrary():
"""
Build a term library using the coefficients from Wagner and Pruss (IAPWS95)
"""
def __init__(self):
L, D, T = [], [], []
for i in range(1, 6):
for j in range(-4, 9):
T.append(float(j) / 8.0)
D.append(float(i))
L.append(float(0))
for i in range(1, 16):
for j in range(1, 16):
T.append(float(j))
D.append(float(i))
L.append(float(1))
for i in range(1, 13):
for j in range(1, 11):
T.append(float(j))
D.append(float(i))
L.append(float(2))
for i in range(1, 6):
for j in range(10, 24):
T.append(float(j))
D.append(float(i))
L.append(float(3))
for i in range(1, 10):
for j in range(10, 21):
T.append(float(j))
D.append(float(i) * 2)
L.append(float(4))
self.T = T
self.D = D
self.L = L
from Helmholtz import helmholtz
def rsquared(x, y):
""" Return R^2 where x and y are array-like."""
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
return r_value**2
def get_fluid_constants(Ref):
if Ref == 'R407F':
RefString = 'REFPROP-MIX:R32[0.47319469]&R125[0.2051091]&R134a[0.32169621]'
elif Ref == 'R410A':
RefString = 'REFPROP-MIX:R32[0.6976147]&R125[0.3023853]'
LIBRARY = TermLibrary()
# Coefficients for HFC blends
LIBRARY.N = np.array([9.87252E-01, -1.03017E+00, 1.17666E+00, 6.10984E+00, -7.79453E+00, 1.83377E-02, 1.05880E+00, -1.12018E+00, 6.29064E-01, 6.24982E+00, -8.07855E+00, 2.64843E-02, -2.53639E+00, 8.50922E-01, -5.20084E-01, -4.64225E-02, -1.75725E+00, 1.38469E+00, -9.22473E-01, -5.03562E-02, 6.79757E-01, -6.52431E-01, 2.33779E-01, -2.91790E-01, -1.38991E-01, 2.62270E-01, -3.51688E-03, -3.51953E-01, 2.86215E-01, -5.07076E-03, -1.96680E+00, 6.21190E-01, -1.95505E-01, -1.12009E+00, 2.77353E-02, 8.22098E-01, -2.77727E-01, -7.58262E-02, -8.15653E-02, 2.00760E-02, -1.39761E-02, 6.89437E-02, -4.42552E-03, 7.55927E-02, -8.30480E-01, 3.36159E-01, 8.98881E-01, -1.17591E+00, 3.58172E-01, -2.21041E-02, -2.33323E-02, -5.07525E-02, -5.42007E-02, 1.16181E-02, 1.09552E-02, -3.76062E-02, -1.26426E-02, 5.53849E-02, -7.10970E-02, 3.10441E-02, 1.32798E-02, 1.54776E-02, -3.14579E-02, 3.52952E-02, 1.59566E-02, -1.85110E-02, -1.01254E-02, 3.02373E-03, 4.55978E-03, 1.72477E-01, -2.61116E-01, -7.45473E-02, 8.18591E-02, -7.94097E-02, -1.04047E-05, 1.71939E-02, 1.61382E-02, 9.15953E-03, 1.70451E-02, 1.05992E-03, 1.16124E-03, -4.82049E-03, -3.61575E-03, -6.36579E-03, -6.07010E-03, -8.75332E-04])
LIBRARY.T = np.array([0.44, 1.2, 2.97, 0.67, 0.91, 5.96, 0.241, 0.69, 2.58, 0.692, 0.943, 5.8, 1.93, 1.7, 3.3, 7, 2.15, 2, 3, 7, 2.1, 4.3, 3.3, 4.7, 2.95, 0.7, 6, 1.15, 0.77, 5.84, 1.78, 2.05, 4.3, 2.43, 5.3, 2.2, 4.3, 12, 12, 13, 16, 13, 16.2, 13, 3, 2.7, 0.76, 1.48, 2.7, 6, 6, 17, 17, 0.3, 0.24, 1.8, 1.2, 0.25, 7, 8.7, 11.6, 0.45, 8.4, 8.5, 11.5, 25, 26, 0.2, 0.248, 0.2, 0.74, 3, 0.24, 2.86, 8, 17, 16, 16, 16.2, 0.7, 0.69, 7.4, 8.7, 1.25, 1.23, 4.7])
LIBRARY.D = np.array([1.0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 9])
LIBRARY.L = np.array([0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 0, 0, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 0, 0, 3, 3, 1, 1, 2])
global indices
indices = set()
while len(indices) < 23:
indices.add(random.randint(0, len(LIBRARY.T) - 1))
print("%s %s" % (indices, len(LIBRARY.T)))
T0 = np.array([LIBRARY.T[i] for i in indices])
D0 = np.array([LIBRARY.D[i] for i in indices])
L0 = np.array([LIBRARY.L[i] for i in indices])
N0 = np.array([LIBRARY.N[i] for i in indices])
# Values from Span short(2003) (polar)
# D0 = np.array([0, 1.0, 1.0, 1.0, 3.0, 7.0, 1.0, 2.0, 5.0, 1.0, 1.0, 4.0, 2.0])
# L0 = np.array([0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0])
# T0 = np.array([0, 0.25, 1.25, 1.5, 0.25, 0.875, 2.375, 2.0, 2.125, 3.5, 6.5, 4.75, 12.5])
# N0 = 0.5*np.ones_like(D0)
# values from R410A
N0 = np.array([0.0, 0.987252, -1.03017, 1.17666, -0.138991, 0.00302373, -2.53639, -1.96680, -0.830480, 0.172477, -0.261116, -0.0745473, 0.679757, -0.652431, 0.0553849, -0.0710970, -0.000875332, 0.0200760, -0.0139761, -0.0185110, 0.0171939, -0.00482049])
T0 = np.array([0.0, 0.44, 1.2, 2.97, 2.95, 0.2, 1.93, 1.78, 3.0, 0.2, 0.74, 3.0, 2.1, 4.3, 0.25, 7.0, 4.7, 13.0, 16.0, 25.0, 17.0, 7.4])
D0 = np.array([0, 1.0, 1, 1, 2, 5, 1, 2, 3, 5, 5, 5, 1, 1, 4, 4, 9, 2, 2, 4, 5, 6])
L0 = np.array([0, 0.0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
indices = set()
while len(indices) < 5:
indices.add(random.randint(0, len(LIBRARY.T) - 1))
print("%s %s" % (indices, len(LIBRARY.T)))
T0 = np.append(T0, [LIBRARY.T[i] for i in indices])
D0 = np.append(D0, [LIBRARY.D[i] for i in indices])
L0 = np.append(L0, [LIBRARY.L[i] for i in indices])
N0 = np.append(N0, [LIBRARY.N[i] for i in indices])
# values from R407C
# N0 = np.array([0.0, 1.0588,-1.12018, 0.629064,-0.351953, 0.00455978,-1.75725,-1.12009, 0.0277353, 0.898881,-1.17591, 0.0818591,-0.0794097,-0.0000104047, 0.233779,-0.291790, 0.0154776,-0.0314579,-0.00442552,-0.0101254, 0.00915953,-0.003615])
# T0 = np.array([0.0,0.241,0.69,2.58,1.15,0.248,2.15,2.43,5.3,0.76,1.48,0.24,2.86,8.0,3.3,4.7,0.45,8.4,16.2,26.0,16.0,8.7])
# D0 = np.array([0.0,1,1,1,2,5,1,2,2,3,3,5,5,5,1,1,4,4,2,4,5,6])
# L0 = np.array([0.0,0,0,0,0,0,1,1,1,1,1,1,1,1,2,2,2,2,3,3,3,3])
return RefString, N0, T0, D0, L0
class IdealPartFitter(object):
def __init__(self, Ref):
self.Ref = Ref
self.RefString, N0, T0, D0, L0 = get_fluid_constants(Ref)
self.molemass = Props(self.RefString, 'molemass')
self.Tc = Props(self.RefString, 'Tcrit')
self.rhoc = Props(self.RefString, 'rhocrit')
self.pc = Props(self.RefString, 'pcrit')
self.T = np.linspace(100, 450, 200)
self.tau = self.Tc / self.T
self.C = Props('C', 'T', self.T, 'D', 1e-15, self.RefString)
R = 8.314472 / self.molemass
self.cp0_R = self.C / R
def cp0_R_from_fit(self, a_e):
a = a_e[0:len(a_e) // 2]
e = a_e[len(a_e) // 2::]
u1 = e[1] / self.T
u2 = e[2] / self.T
u3 = e[3] / self.T
return a[0] * self.T**e[0] + a[1] * u1**2 * np.exp(u1) / (np.exp(u1) - 1)**2 + a[2] * u2**2 * np.exp(u2) / (np.exp(u2) - 1)**2 + a[3] * u3**2 * np.exp(u3) / (np.exp(u3) - 1)**2
def OBJECTIVE_cp0_R(self, a_e):
cp0_R_fit = self.cp0_R_from_fit(a_e)
RMS = np.sqrt(np.mean(np.power((self.cp0_R - cp0_R_fit) / self.cp0_R, 2)))
return RMS
def fit(self):
a_e = [2.8749, 2.0623, 5.9751, 1.5612, 0.1, 697.0, 1723.0, 3875.0]
a_e = scipy.optimize.minimize(self.OBJECTIVE_cp0_R, a_e).x
self.a = a_e[0:len(a_e) // 2]
self.e = a_e[len(a_e) // 2::]
cp0_over_R_check = 1 - self.tau**2 * self.d2phi0_dTau2(self.tau)
plt.plot(self.T, (self.cp0_R_from_fit(a_e) / self.cp0_R - 1) * 100, '-', self.T, (cp0_over_R_check / self.cp0_R - 1) * 100, '^')
plt.xlabel('Temperature [K]')
plt.ylabel('($c_{p0}/R$ (fit) / $c_{p0}/R$ (REFPROP) -1)*100 [%]')
plt.savefig('cp0.pdf')
plt.close()
def d2phi0_dTau2(self, tau):
d = []
for _tau in tau:
# lead term is killed
d.append(helmholtz.phi0_logtau(-1.0).dTau2(_tau, _tau)
+ helmholtz.phi0_cp0_poly(self.a[0], self.e[0], self.Tc, 298.15).dTau2(_tau, _tau)
+ helmholtz.phi0_Planck_Einstein(self.a, self.e / self.Tc, 1, len(self.a) - 1).dTau2(_tau, _tau)
)
return np.array(d)
class ResidualPartFitter(object):
def __init__(self, Ref, IPF):
self.Ref = Ref
self.IPF = IPF
self.RefString, self.N0, self.T0, self.D0, self.L0 = get_fluid_constants(Ref)
self.Tc = Props(self.RefString, 'Tcrit')
self.rhoc = Props(self.RefString, 'rhocrit')
molemass = Props(self.RefString, 'molemass')
self.R = 8.314472 / molemass
def termwise_Rsquared(self):
keepers = []
values = []
print('%s terms at start' % len(self.N0))
for i in range(len(self.N0)):
n = helmholtz.vectord([float(1)])
d = helmholtz.vectord([self.D0[i]])
t = helmholtz.vectord([self.T0[i]])
l = helmholtz.vectord([self.L0[i]])
self.phir = helmholtz.phir_power(n, d, t, l, 0, 0)
PPF = self.evaluate_EOS(np.array(list(n)))
R2 = rsquared(PPF.p, self.phir.dDeltaV(self.tauV, self.deltaV))
values.append((R2, i))
if R2 > 0.9:
keepers.append(i)
values, indices = zip(*reversed(sorted(values)))
keepers = list(indices[0:30])
self.N0 = self.N0[keepers]
self.T0 = self.T0[keepers]
self.D0 = self.D0[keepers]
self.L0 = self.L0[keepers]
print('%s terms at end' % len(self.N0))
def generate_1phase_data(self):
Tc = Props(self.RefString, 'Tcrit')
rhoc = Props(self.RefString, 'rhocrit')
TTT, RHO, PPP, CPP, CVV, AAA = [], [], [], [], [], []
for _T in np.linspace(220, 450, 100):
print(_T)
for _rho in np.logspace(np.log10(1e-2), np.log10(rhoc), 100):
try:
if _T > Tc:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
DL = Props('D', 'T', _T, 'Q', 0, self.RefString)
DV = Props('D', 'T', _T, 'Q', 1, self.RefString)
if _rho < DV or _rho > DL:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
p = None
if p is not None:
TTT.append(_T)
RHO.append(_rho)
PPP.append(p)
CPP.append(cp)
CVV.append(cv)
AAA.append(a)
except ValueError as VE:
print(VE)
pass
for _rho in np.linspace(rhoc, 3.36 * rhoc, 50):
try:
if _T > Tc:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
DL = Props('D', 'T', _T, 'Q', 0, self.RefString)
DV = Props('D', 'T', _T, 'Q', 1, self.RefString)
if _rho < DV or _rho > DL:
p = Props('P', 'T', _T, 'D', _rho, self.RefString)
cp = Props('C', 'T', _T, 'D', _rho, self.RefString)
cv = Props('O', 'T', _T, 'D', _rho, self.RefString)
a = Props('A', 'T', _T, 'D', _rho, self.RefString)
else:
p = None
if p is not None:
TTT.append(_T)
RHO.append(_rho)
PPP.append(p)
CPP.append(cp)
CVV.append(cv)
AAA.append(a)
except ValueError as VE:
print(VE)
pass
h = h5py.File('T_rho_p.h5', 'w')
grp = h.create_group(self.Ref)
grp.create_dataset("T", data=np.array(TTT), compression="gzip")
grp.create_dataset("rho", data=np.array(RHO), compression="gzip")
grp.create_dataset("p", data=np.array(PPP), compression="gzip")
grp.create_dataset("cp", data=np.array(CPP), compression="gzip")
grp.create_dataset("cv", data=np.array(CVV), compression="gzip")
grp.create_dataset("speed_sound", data=np.array(AAA), compression="gzip")
h.close()
def load_data(self):
h = h5py.File('T_rho_p.h5', 'r')
self.T = h.get(self.Ref + '/T').value
self.rho = h.get(self.Ref + '/rho').value
self.p = h.get(self.Ref + '/p').value
self.cp = h.get(self.Ref + '/cp').value
self.cv = h.get(self.Ref + '/cv').value
self.speed_sound = h.get(self.Ref + '/speed_sound').value
self.tau = self.Tc / self.T
self.delta = self.rho / self.rhoc
self.tauV = helmholtz.vectord(self.tau)
self.deltaV = helmholtz.vectord(self.delta)
# Get the derivative d2phi0_dTau2 from the ideal part fitter
self.d2phi0_dTau2 = self.IPF.d2phi0_dTau2(self.tau)
def evaluate_EOS(self, N):
self.phir.n = helmholtz.vectord(N)
dDelta = self.phir.dDeltaV(self.tauV, self.deltaV)
dTau2 = self.phir.dTau2V(self.tauV, self.deltaV)
dDelta2 = self.phir.dDelta2V(self.tauV, self.deltaV)
dDelta_dTau = self.phir.dDelta_dTauV(self.tauV, self.deltaV)
# Evaluate the pressure
p = (self.rho * self.R * self.T) * (1 + self.delta * dDelta)
# Evaluate the specific heat at constant volume
cv_over_R = -self.tau**2 * (self.d2phi0_dTau2 + dTau2)
cv = cv_over_R * self.R
# Evaluate the specific heat at constant pressure
cp_over_R = cv_over_R + (1.0 + self.delta * dDelta - self.delta * self.tau * dDelta_dTau)**2 / (1 + 2 * self.delta * dDelta + self.delta**2 * dDelta2)
cp = cp_over_R * self.R
# Evaluate the speed of sound
w = np.sqrt(1000 * self.R * self.T * cp_over_R / cv_over_R * (1 + 2 * self.delta * dDelta + self.delta**2 * dDelta2))
class stub: pass
PPF = stub()
PPF.p = np.array(p, ndmin=1).T
PPF.cp = np.array(cp, ndmin=1).T
PPF.cv = np.array(cv, ndmin=1).T
PPF.w = np.array(w, ndmin=1).T
return PPF
def OBJECTIVE(self, N):
PPF = self.evaluate_EOS(N)
## plt.plot(PPF.p, self.p); plt.show()
## plt.plot(PPF.cp, self.cp); plt.show()
## plt.plot(PPF.cv, self.cv); plt.show()
## plt.plot(PPF.w, self.speed_sound); plt.show()
w_p = 1.0
w_cv = 1.0
w_w = 1.0
w_cp = 1.0
w_total = (w_p + w_cv + w_w + w_cp) / 4
w_p_norm = w_p / w_total
w_cv_norm = w_cv / w_total
w_cp_norm = w_cp / w_total
w_w_norm = w_w / w_total
residuals = np.r_[(PPF.p / self.p - 1), (PPF.cv / self.cv - 1), (PPF.cp / self.cp - 1)] # ,(PPF.w**2/self.speed_sound**2-1)]
RMS = np.sqrt(np.mean(np.power(residuals, 2)))
print('RMS: %s %% Max %s %%' % (RMS * 100, np.max(np.abs(residuals)) * 100))
self.RMS = RMS
self.MaxError = np.max(np.abs(residuals))
return RMS
def fit(self):
# Kill off some not as good terms
# self.termwise_Rsquared()
# Load up the residual Helmholtz term with parameters
n = helmholtz.vectord(self.N0)
d = helmholtz.vectord(self.D0)
t = helmholtz.vectord(self.T0)
l = helmholtz.vectord(self.L0)
self.phir = helmholtz.phir_power(n, d, t, l, 1, len(self.N0) - 1)
# Solve for the coefficients
Nbounds = [(-10, 10) for _ in range(len(self.N0))]
tbounds = [(-1, 30) for _ in range(len(self.T0))]
print(self.OBJECTIVE(np.array(list(self.N0))))
#self.N = self.N0
#self.N = scipy.optimize.minimize(self.OBJECTIVE, np.array(list(self.N0)), bounds = Nbounds, options = dict(maxiter = 5)).x
self.N = scipy.optimize.minimize(self.OBJECTIVE, np.array(list(self.N0)), method='L-BFGS-B', bounds=Nbounds, options=dict(maxiter=100)).x
# Write the coefficients to HDF5 file
h = h5py.File('fit_coeffs.h5', 'w')
grp = h.create_group(self.Ref)
grp.create_dataset("n", data=np.array(self.N), compression="gzip")
print(self.N)
#grp.create_dataset("t", data = np.array(self.N[len(self.N)//2::]), compression = "gzip")
h.close()
def evaluate_REFPROP(self, Ref, T, rho):
p, cp, cv, w = [], [], [], []
R = 8.314472 / Props(Ref, 'molemass')
for _T, _rho in zip(T, rho):
p.append(Props("P", 'T', _T, 'D', _rho, Ref))
cp.append(Props("C", 'T', _T, 'D', _rho, Ref))
cv.append(Props("O", 'T', _T, 'D', _rho, Ref))
w.append(Props("A", 'T', _T, 'D', _rho, Ref))
class stub: pass
PPF = stub()
PPF.p = np.array(p, ndmin=1).T
PPF.cp = np.array(cp, ndmin=1).T
PPF.cv = np.array(cv, ndmin=1).T
PPF.w = np.array(w, ndmin=1).T
return PPF
def check(self):
# Load the coefficients from file
h = h5py.File('fit_coeffs.h5', 'r')
grp = h.get(self.Ref)
n = grp.get('n').value
h.close()
print(n)
import matplotlib.colors as colors
cNorm = colors.LogNorm(vmin=1e-3, vmax=50)
PPF = self.evaluate_EOS(np.array(list(n)))
self.OBJECTIVE(np.array(list(n)))
print('max error (p) %s %%' % np.max(np.abs(PPF.p / self.p - 1) * 100))
SC1 = plt.scatter(self.rho, self.T, s=8, c=np.abs(PPF.p / self.p - 1) * 100, edgecolors='none', cmap=plt.get_cmap('jet'), norm=cNorm)
plt.gca().set_xscale('log')
cb = plt.colorbar()
cb.set_label('abs(PPF.p/self.p-1)*100')
plt.savefig('pressure.png')
plt.show()
print('max error (cp) %s %%' % np.max(np.abs(PPF.cp / self.cp - 1) * 100))
SC1 = plt.scatter(self.rho, self.T, s=8, c=np.abs(PPF.cp / self.cp - 1) * 100, edgecolors='none', cmap=plt.get_cmap('jet'), norm=cNorm)
plt.gca().set_xscale('log')
cb = plt.colorbar()
cb.set_label('abs(PPF.cp/self.cp-1)*100')
plt.savefig('cp.png')
plt.show()
## plt.plot(self.T,PPF.p/self.p,'.'); plt.show()
## plt.plot(self.T,PPF.cp/self.cp,'.'); plt.show()
## plt.plot(self.T,PPF.cv/self.cv,'.'); plt.show()
## plt.plot(self.T,PPF.w/self.speed_sound,'.'); plt.show()
class PPFFitterClass(object):
def __init__(self, Ref, regenerate_data=True, fit=True):
self.Ref = Ref
self.IPF = IdealPartFitter(Ref)
self.IPF.fit()
for i in range(1):
self.RPF = ResidualPartFitter(Ref, IPF=self.IPF)
if regenerate_data:
self.RPF.generate_1phase_data()
self.RPF.load_data()
if fit:
self.RPF.fit()
f = open('results.txt', 'a+')
print("%s %s %s" % (indices, self.RPF.RMS, self.RPF.MaxError), file=f)
f.close()
self.RPF.check()
quit()
self.output_files()
def contour_plot(values):
"""
Parameters
----------
values : iterable, same size as T and rho
"""
plt.semilogx(self.RPF.rho, self.RPF.T, 'o')
plt.show()
# Generate a regular grid to interpolate the data.
xi = np.linspace(min(self.RPF.T), max(self.RPF.T), 100)
yi = np.linspace(min(self.RPF.rho), max(self.RPF.rho), 100)
xi, yi = np.meshgrid(xi, yi)
# Interpolate using delaunay triangularization
zi = mlab.griddata(np.array(self.RPF.T), np.array(self.RPF.rho), np.array(values), xi, yi)
cont = plt.contourf(yi, xi, zi, 30)
plt.colorbar()
plt.show()
def output_files(self):
h = h5py.File('fit_coeffs.h5', 'r')
n = h.get(self.Ref + '/n').value
#t = h.get(self.Ref+'/t').value
# Output the header file
header = PPF_h_template.format(Ref=self.Ref, RefUpper=self.Ref.upper())
acoeffs = '0, ' + ', '.join(['{a:0.6f}'.format(a=_) for _ in self.IPF.a])
# First one doesn't get divided by critical temperature, later ones do
bcoeffs = '0, '
bcoeffs += str(self.IPF.e[0]) + ', '
bcoeffs += ', '.join(['{b:0.4f}/{Tcrit:g}'.format(b=_, Tcrit=self.IPF.Tc) for _ in self.IPF.e[1::]])
ncoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in n])
tcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.T0])
dcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.D0])
lcoeffs = ', '.join(['{a:0.6g}'.format(a=_) for _ in self.RPF.L0])
import sys
sys.path.append('..')
from fit_ancillary_ODRPACK import saturation_pressure, saturation_density
pL = saturation_pressure(self.IPF.RefString, self.IPF.Ref, LV='L')
pV = saturation_pressure(self.IPF.RefString, self.IPF.Ref, LV='V')
rhoL = saturation_density(self.IPF.RefString, self.IPF.Ref, form='A', LV='L', add_critical=False)
rhoV = saturation_density(self.IPF.RefString, self.IPF.Ref, form='B', LV='V', add_critical=False)
code = PPF_cpp_template.format(Ref=self.Ref,
RefUpper=self.Ref.upper(),
acoeffs=acoeffs,
bcoeffs=bcoeffs,
Ncoeffs=ncoeffs,
tcoeffs=tcoeffs,
dcoeffs=dcoeffs,
Lcoeffs=lcoeffs,
N_phir=len(n),
N_cp0=len(self.IPF.a),
molemass=self.IPF.molemass,
Ttriple=200,
accentric=0.7,
pcrit=self.IPF.pc,
Tcrit=self.IPF.Tc,
rhocrit=self.IPF.rhoc,
pL=pL,
pV=pV,
rhoL=rhoL,
rhoV=rhoV
)
f = open(self.IPF.Ref + '.h', 'w')
f.write(header)
f.close()
f = open(self.IPF.Ref + '.cpp', 'w')
f.write(code)
f.close()
if __name__ == '__main__':
Ref = 'R407F'
PPFFitterClass(Ref)
| mit |
ScottBuchanan/eden | tests/travis/generate_requirements_file.py | 32 | 1088 | #!/usr/bin/python
# usage - python generate_requirements.py [folder where the file should be generated] [list of requirements file]
# example - python tests/travis/generate_requirements_file.py tests/travis requirements.txt optional_requirements.txt
from sys import argv
# numpy - preinstalled
# matplotlib, lxml - installed from binaries
# pyrtf not working with pip
# packages not to be installed
exclude = ("numpy", "matplotlib", "lxml", "PyRTF", "PIL", "GDAL", "Shapely")
# the output requirements file
gen_req_file = open("%s/generated_requirements.txt" % argv[1], "w")
# iterate over all the requirements file
for req_file in argv[2:]:
requirements = open(req_file).readlines()
for line in requirements:
line = line.strip()
# comment in the file
if line[0] == "#":
continue
found = False
for item in exclude:
if item.lower() in line.lower():
found = True
continue
if found:
continue
gen_req_file.write("%s\n" % line)
gen_req_file.close()
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/datasets/longley/data.py | 3 | 1887 | """Longley dataset"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
The classic 1967 Longley Data
http://www.itl.nist.gov/div898/strd/lls/data/Longley.shtml
::
Longley, J.W. (1967) "An Appraisal of Least Squares Programs for the
Electronic Comptuer from the Point of View of the User." Journal of
the American Statistical Association. 62.319, 819-41.
"""
DESCRSHORT = """"""
DESCRLONG = """The Longley dataset contains various US macroeconomic
variables that are known to be highly collinear. It has been used to appraise
the accuracy of least squares routines."""
NOTE = """
Number of Observations - 16
Number of Variables - 6
Variable name definitions::
TOTEMP - Total Employment
GNPDEFL - GNP deflator
GNP - GNP
UNEMP - Number of unemployed
ARMED - Size of armed forces
POP - Population
YEAR - Year (1947 - 1962)
"""
from numpy import recfromtxt, array, column_stack
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset instance
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath+'/longley.csv',"rb"), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
| apache-2.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py | 69 | 22336 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%d" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| agpl-3.0 |
mapleyustat/tns | calcs/peps2d_square_ising_m_T_H_thdyn/plot.py | 1 | 1810 | # -*- coding: utf-8 -*-
import numpy as np
import sys
import matplotlib.pyplot as plt
Nv = int(sys.argv[1])
Nh = int(sys.argv[2])
NT = int(sys.argv[3])
NH = int(sys.argv[4])
T = np.ndarray(NT)
H = np.ndarray(NH)
x = np.ndarray((NH, NT))
with open("F_T_H.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
if i < NT:
T[i] = float(fields[1])
if i % NT == 0:
H[i/NT] = float(fields[0])
x[i/NT,i%NT] = float(fields[2])
i += 1
for i in range(NH):
plt.plot(T, x[i], marker="x", label="$H = " + str(H[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $N_h = " + str(Nh) + "$")
plt.legend(loc=2)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$F$ $[J]$")
plt.ylim(ymin=0)
plt.savefig("F_T_H.png")
with open("m_T_H.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
x[i/NT,i%NT] = float(fields[2])
i += 1
plt.clf()
for i in range(NH):
plt.plot(T, x[i], marker="x", label="$H = " + str(H[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $N_h = " + str(Nh) + "$")
plt.legend(loc=1)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$m = \\frac{M}{N} = \\frac{1}{N} \\, \\frac{\partial F}{\partial H} + 1$")
plt.ylim(0, 1)
plt.savefig("m_T_H.png")
with open("chi_T_H.dat", "r") as f:
i = 0
for line in f:
fields = line.split(" ")
x[i/NT,i%NT] = float(fields[2])
i += 1
plt.clf()
for i in range(NH):
plt.plot(T, x[i], marker="x", label="$H = " + str(H[i]) + "$")
plt.grid(True)
plt.title("$N_v = " + str(Nv) + "$, $N_h = " + str(Nh) + "$")
plt.legend(loc=1)
plt.xlabel("$T$ $[J]$")
plt.ylabel("$\\chi = \\frac{\partial m}{\partial H} = \\frac{1}{N} \\, \\frac{\partial^2 F}{\partial H^2}$ $[1/J]$")
plt.ylim(0, 3.5)
plt.savefig("chi_T_H.png")
| gpl-2.0 |
djgagne/scikit-learn | sklearn/tests/test_random_projection.py | 79 | 14035 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
arokem/nipy | examples/algorithms/ward_clustering.py | 3 | 2047 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Demo ward clustering on a graph: various ways of forming clusters and dendrogram
Requires matplotlib
"""
print(__doc__)
import numpy as np
from numpy.random import randn, rand
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.algorithms.graph import knn
from nipy.algorithms.clustering.hierarchical_clustering import ward
# n = number of points, k = number of nearest neighbours
n = 100
k = 5
# Set verbose to True to see more printed output
verbose = False
X = randn(n, 2)
X[:np.ceil(n / 3)] += 3
G = knn(X, 5)
tree = ward(G, X, verbose)
threshold = .5 * n
u = tree.partition(threshold)
plt.figure(figsize=(12, 6))
plt.subplot(1, 3, 1)
for i in range(u.max()+1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into clusters \n of inertia < %g' % threshold)
u = tree.split(k)
plt.subplot(1, 3, 2)
for e in range(G.E):
plt.plot([X[G.edges[e, 0], 0], X[G.edges[e, 1], 0]],
[X[G.edges[e, 0], 1], X[G.edges[e, 1], 1]], 'k')
for i in range(u.max() + 1):
plt.plot(X[u == i, 0], X[u == i, 1], 'o', color=(rand(), rand(), rand()))
plt.axis('tight')
plt.axis('off')
plt.title('clustering into 5 clusters')
nl = np.sum(tree.isleaf())
validleaves = np.zeros(n)
validleaves[:np.ceil(n / 4)] = 1
valid = np.zeros(tree.V, 'bool')
valid[tree.isleaf()] = validleaves.astype('bool')
nv = np.sum(validleaves)
nv0 = 0
while nv > nv0:
nv0 = nv
for v in range(tree.V):
if valid[v]:
valid[tree.parents[v]]=1
nv = np.sum(valid)
ax = plt.subplot(1, 3, 3)
ax = tree.plot(ax)
ax.set_title('Dendrogram')
ax.set_visible(True)
plt.show()
if verbose:
print('List of sub trees')
print(tree.list_of_subtrees())
| bsd-3-clause |
Nyker510/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
h2educ/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
jorisvandenbossche/geopandas | geopandas/geodataframe.py | 1 | 17834 | try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from ordereddict import OrderedDict
import json
import os
import sys
import numpy as np
from pandas import DataFrame, Series, Index
from shapely.geometry import mapping, shape
from shapely.geometry.base import BaseGeometry
from six import string_types
from geopandas import GeoSeries
from geopandas.base import GeoPandasBase
from geopandas.plotting import plot_dataframe
import geopandas.io
DEFAULT_GEO_COLUMN_NAME = 'geometry'
PY3 = sys.version_info[0] == 3
class GeoDataFrame(GeoPandasBase, DataFrame):
"""
A GeoDataFrame object is a pandas.DataFrame that has a column
with geometry. In addition to the standard DataFrame constructor arguments,
GeoDataFrame also accepts the following keyword arguments:
Keyword Arguments
-----------------
crs : str (optional)
Coordinate system
geometry : str or array (optional)
If str, column to use as geometry. If array, will be set as 'geometry'
column on GeoDataFrame.
"""
# XXX: This will no longer be necessary in pandas 0.17
_internal_names = ['_data', '_cacher', '_item_cache', '_cache',
'is_copy', '_subtyp', '_index',
'_default_kind', '_default_fill_value', '_metadata',
'__array_struct__', '__array_interface__']
_metadata = ['crs', '_geometry_column_name']
_geometry_column_name = DEFAULT_GEO_COLUMN_NAME
def __init__(self, *args, **kwargs):
crs = kwargs.pop('crs', None)
geometry = kwargs.pop('geometry', None)
super(GeoDataFrame, self).__init__(*args, **kwargs)
self.crs = crs
if geometry is not None:
self.set_geometry(geometry, inplace=True)
self._invalidate_sindex()
# Serialize metadata (will no longer be necessary in pandas 0.17+)
# See https://github.com/pydata/pandas/pull/10557
def __getstate__(self):
meta = dict((k, getattr(self, k, None)) for k in self._metadata)
return dict(_data=self._data, _typ=self._typ,
_metadata=self._metadata, **meta)
def __setattr__(self, attr, val):
# have to special case geometry b/c pandas tries to use as column...
if attr == 'geometry':
object.__setattr__(self, attr, val)
else:
super(GeoDataFrame, self).__setattr__(attr, val)
def _get_geometry(self):
if self._geometry_column_name not in self:
raise AttributeError("No geometry data set yet (expected in"
" column '%s'." % self._geometry_column_name)
return self[self._geometry_column_name]
def _set_geometry(self, col):
# TODO: Use pandas' core.common.is_list_like() here.
if not isinstance(col, (list, np.ndarray, Series)):
raise ValueError("Must use a list-like to set the geometry"
" property")
self.set_geometry(col, inplace=True)
geometry = property(fget=_get_geometry, fset=_set_geometry,
doc="Geometry data for GeoDataFrame")
def set_geometry(self, col, drop=False, inplace=False, crs=None):
"""
Set the GeoDataFrame geometry using either an existing column or
the specified input. By default yields a new object.
The original geometry column is replaced with the input.
Parameters
----------
keys : column label or array
drop : boolean, default True
Delete column to be used as the new geometry
inplace : boolean, default False
Modify the GeoDataFrame in place (do not create a new object)
crs : str/result of fion.get_crs (optional)
Coordinate system to use. If passed, overrides both DataFrame and
col's crs. Otherwise, tries to get crs from passed col values or
DataFrame.
Examples
--------
>>> df1 = df.set_geometry([Point(0,0), Point(1,1), Point(2,2)])
>>> df2 = df.set_geometry('geom1')
Returns
-------
geodataframe : GeoDataFrame
"""
# Most of the code here is taken from DataFrame.set_index()
if inplace:
frame = self
else:
frame = self.copy()
if not crs:
crs = getattr(col, 'crs', self.crs)
to_remove = None
geo_column_name = self._geometry_column_name
if isinstance(col, (Series, list, np.ndarray)):
level = col
elif hasattr(col, 'ndim') and col.ndim != 1:
raise ValueError("Must pass array with one dimension only.")
else:
try:
level = frame[col].values
except KeyError:
raise ValueError("Unknown column %s" % col)
except:
raise
if drop:
to_remove = col
geo_column_name = self._geometry_column_name
else:
geo_column_name = col
if to_remove:
del frame[to_remove]
if isinstance(level, GeoSeries) and level.crs != crs:
# Avoids caching issues/crs sharing issues
level = level.copy()
level.crs = crs
# Check that we are using a listlike of geometries
if not all(isinstance(item, BaseGeometry) or not item for item in level):
raise TypeError("Input geometry column must contain valid geometry objects.")
frame[geo_column_name] = level
frame._geometry_column_name = geo_column_name
frame.crs = crs
frame._invalidate_sindex()
if not inplace:
return frame
@classmethod
def from_file(cls, filename, **kwargs):
"""
Alternate constructor to create a GeoDataFrame from a file.
Example:
df = geopandas.GeoDataFrame.from_file('nybb.shp')
Wraps geopandas.read_file(). For additional help, see read_file()
"""
return geopandas.io.file.read_file(filename, **kwargs)
@classmethod
def from_features(cls, features, crs=None):
"""
Alternate constructor to create GeoDataFrame from an iterable of
features. Each element must be a feature dictionary or implement
the __geo_interface__.
See: https://gist.github.com/sgillies/2217756
"""
rows = []
for f in features:
if hasattr(f, "__geo_interface__"):
f = f.__geo_interface__
else:
f = f
d = {'geometry': shape(f['geometry']) if f['geometry'] else None}
d.update(f['properties'])
rows.append(d)
df = GeoDataFrame.from_dict(rows)
df.crs = crs
return df
@classmethod
def from_postgis(cls, sql, con, geom_col='geom', crs=None, index_col=None,
coerce_float=True, params=None):
"""
Alternate constructor to create a GeoDataFrame from a sql query
containing a geometry column.
Example:
df = geopandas.GeoDataFrame.from_postgis(con,
"SELECT geom, highway FROM roads;")
Wraps geopandas.read_postgis(). For additional help, see read_postgis()
"""
return geopandas.io.sql.read_postgis(sql, con, geom_col, crs, index_col,
coerce_float, params)
def to_json(self, na='null', show_bbox=False, **kwargs):
"""
Returns a GeoJSON string representation of the GeoDataFrame.
Parameters
----------
na : {'null', 'drop', 'keep'}, default 'null'
Indicates how to output missing (NaN) values in the GeoDataFrame
* null: output the missing entries as JSON null
* drop: remove the property from the feature. This applies to
each feature individually so that features may have
different properties
* keep: output the missing entries as NaN
show_bbox : include bbox (bounds) in the geojson
The remaining *kwargs* are passed to json.dumps().
"""
return json.dumps(self._to_geo(na=na, show_bbox=show_bbox), **kwargs)
@property
def __geo_interface__(self):
"""
Returns a python feature collection (i.e. the geointerface)
representation of the GeoDataFrame.
This differs from `_to_geo()` only in that it is a property with
default args instead of a method
"""
return self._to_geo(na='null', show_bbox=True)
def iterfeatures(self, na='null', show_bbox=False):
"""
Returns an iterator that yields feature dictionaries that comply with
__geo_interface__
Parameters
----------
na : {'null', 'drop', 'keep'}, default 'null'
Indicates how to output missing (NaN) values in the GeoDataFrame
* null: ouput the missing entries as JSON null
* drop: remove the property from the feature. This applies to
each feature individually so that features may have
different properties
* keep: output the missing entries as NaN
show_bbox : include bbox (bounds) in the geojson. default False
"""
def fill_none(row):
"""
Takes in a Series, converts to a dictionary with null values
set to None
"""
na_keys = row.index[row.isnull()]
d = row.to_dict()
for k in na_keys:
d[k] = None
return d
# na_methods must take in a Series and return dict
na_methods = {'null': fill_none,
'drop': lambda row: row.dropna().to_dict(),
'keep': lambda row: row.to_dict()}
if na not in na_methods:
raise ValueError('Unknown na method {0}'.format(na))
f = na_methods[na]
for i, row in self.iterrows():
properties = f(row)
del properties[self._geometry_column_name]
feature = {
'id': str(i),
'type': 'Feature',
'properties': properties,
'geometry': mapping(row[self._geometry_column_name])
if row[self._geometry_column_name] else None
}
if show_bbox:
feature['bbox'] = row.geometry.bounds
yield feature
def _to_geo(self, **kwargs):
"""
Returns a python feature collection (i.e. the geointerface)
representation of the GeoDataFrame.
"""
geo = {'type': 'FeatureCollection',
'features': list(self.iterfeatures(**kwargs))}
if kwargs.get('show_bbox', False):
geo['bbox'] = self.total_bounds
return geo
def to_file(self, filename, driver="ESRI Shapefile", schema=None,
**kwargs):
"""
Write this GeoDataFrame to an OGR data source
A dictionary of supported OGR providers is available via:
>>> import fiona
>>> fiona.supported_drivers
Parameters
----------
filename : string
File path or file handle to write to.
driver : string, default 'ESRI Shapefile'
The OGR format driver used to write the vector file.
schema : dict, default None
If specified, the schema dictionary is passed to Fiona to
better control how the file is written.
The *kwargs* are passed to fiona.open and can be used to write
to multi-layer data, store data within archives (zip files), etc.
"""
from geopandas.io.file import to_file
to_file(self, filename, driver, schema, **kwargs)
def to_crs(self, crs=None, epsg=None, inplace=False):
"""Transform geometries to a new coordinate reference system
This method will transform all points in all objects. It has
no notion or projecting entire geometries. All segments
joining points are assumed to be lines in the current
projection, not geodesics. Objects crossing the dateline (or
other projection boundary) will have undesirable behavior.
`to_crs` passes the `crs` argument to the `Proj` function from the
`pyproj` library (with the option `preserve_units=True`). It can
therefore accept proj4 projections in any format
supported by `Proj`, including dictionaries, or proj4 strings.
"""
if inplace:
df = self
else:
df = self.copy()
geom = df.geometry.to_crs(crs=crs, epsg=epsg)
df.geometry = geom
df.crs = geom.crs
if not inplace:
return df
def __getitem__(self, key):
"""
If the result is a column containing only 'geometry', return a
GeoSeries. If it's a DataFrame with a 'geometry' column, return a
GeoDataFrame.
"""
result = super(GeoDataFrame, self).__getitem__(key)
geo_col = self._geometry_column_name
if isinstance(key, string_types) and key == geo_col:
result.__class__ = GeoSeries
result.crs = self.crs
result._invalidate_sindex()
elif isinstance(result, DataFrame) and geo_col in result:
result.__class__ = GeoDataFrame
result.crs = self.crs
result._geometry_column_name = geo_col
result._invalidate_sindex()
elif isinstance(result, DataFrame) and geo_col not in result:
result.__class__ = DataFrame
return result
#
# Implement pandas methods
#
def merge(self, *args, **kwargs):
result = DataFrame.merge(self, *args, **kwargs)
geo_col = self._geometry_column_name
if isinstance(result, DataFrame) and geo_col in result:
result.__class__ = GeoDataFrame
result.crs = self.crs
result._geometry_column_name = geo_col
result._invalidate_sindex()
elif isinstance(result, DataFrame) and geo_col not in result:
result.__class__ = DataFrame
return result
@property
def _constructor(self):
return GeoDataFrame
def __finalize__(self, other, method=None, **kwargs):
"""propagate metadata from other to self """
# merge operation: using metadata of the left object
if method == 'merge':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.left, name, None))
# concat operation: using metadata of the first object
elif method == 'concat':
for name in self._metadata:
object.__setattr__(self, name, getattr(other.objs[0], name, None))
else:
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def copy(self, deep=True):
"""
Make a copy of this GeoDataFrame object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : GeoDataFrame
"""
# FIXME: this will likely be unnecessary in pandas >= 0.13
data = self._data
if deep:
data = data.copy()
return GeoDataFrame(data).__finalize__(self)
def plot(self, *args, **kwargs):
return plot_dataframe(self, *args, **kwargs)
plot.__doc__ = plot_dataframe.__doc__
def dissolve(self, by=None, aggfunc='first', as_index=True):
"""
Dissolve geometries within `groupby` into single observation.
This is accomplished by applying the `unary_union` method
to all geometries within a groupself.
Observations associated with each `groupby` group will be aggregated
using the `aggfunc`.
Parameters
----------
by : string, default None
Column whose values define groups to be dissolved
aggfunc : function or string, default "first"
Aggregation function for manipulation of data associated
with each group. Passed to pandas `groupby.agg` method.
as_index : boolean, default True
If true, groupby columns become index of result.
Returns
-------
GeoDataFrame
"""
# Process non-spatial component
data = self.drop(labels=self.geometry.name, axis=1)
aggregated_data = data.groupby(by=by).agg(aggfunc)
# Process spatial component
def merge_geometries(block):
merged_geom = block.unary_union
return merged_geom
g = self.groupby(by=by, group_keys=False)[self.geometry.name].agg(merge_geometries)
# Aggregate
aggregated_geometry = GeoDataFrame(g, geometry=self.geometry.name)
# Recombine
aggregated = aggregated_geometry.join(aggregated_data)
# Reset if requested
if not as_index:
aggregated = aggregated.reset_index()
return aggregated
def _dataframe_set_geometry(self, col, drop=False, inplace=False, crs=None):
if inplace:
raise ValueError("Can't do inplace setting when converting from"
" DataFrame to GeoDataFrame")
gf = GeoDataFrame(self)
# this will copy so that BlockManager gets copied
return gf.set_geometry(col, drop=drop, inplace=False, crs=crs)
if PY3:
DataFrame.set_geometry = _dataframe_set_geometry
else:
import types
DataFrame.set_geometry = types.MethodType(_dataframe_set_geometry, None,
DataFrame)
| bsd-3-clause |
steromano/BayesianMethodsForHackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
equialgo/scikit-learn | sklearn/utils/tests/test_fixes.py | 28 | 3156 | # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import pickle
import numpy as np
import math
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.fixes import norm
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
def test_masked_array_obj_dtype_pickleable():
marr = MaskedArray([1, None, 'a'], dtype=object)
for mask in (True, False, [0, 1, 0]):
marr.mask = mask
marr_pickled = pickle.loads(pickle.dumps(marr))
assert_array_equal(marr.data, marr_pickled.data)
assert_array_equal(marr.mask, marr_pickled.mask)
def test_norm():
X = np.array([[-2, 4, 5],
[1, 3, -4],
[0, 0, 8],
[0, 0, 0]]).astype(float)
# Test various axis and order
assert_equal(math.sqrt(135), norm(X))
assert_array_equal(
np.array([math.sqrt(5), math.sqrt(25), math.sqrt(105)]),
norm(X, axis=0)
)
assert_array_equal(np.array([3, 7, 17]), norm(X, axis=0, ord=1))
assert_array_equal(np.array([2, 4, 8]), norm(X, axis=0, ord=np.inf))
assert_array_equal(np.array([0, 0, 0]), norm(X, axis=0, ord=-np.inf))
assert_array_equal(np.array([11, 8, 8, 0]), norm(X, axis=1, ord=1))
# Test shapes
assert_equal((), norm(X).shape)
assert_equal((3,), norm(X, axis=0).shape)
assert_equal((4,), norm(X, axis=1).shape)
| bsd-3-clause |
cauchycui/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
COMBINE-lab/matryoshka_work | coredomains-import/python-src/compute_overlap_one_chromo.py | 1 | 7902 | # parse their domains, our domains, compute overlap
import sys
#import numpy.bisect
#import numpy
#import igraph
import collections
import matplotlib.pyplot as plt
import matplotlib
import math
Domain = collections.namedtuple("Domain", ["start", "end"])
def jaccard(D1, D2, N):
arr1 = [0 for i in xrange(N)]
for d1 in D1:
frag_d1 = set( [i for i in xrange(d1.start, d1.end+1 ) ] )
for e in frag_d1:
arr1[e] = 1
arr2 = [0 for i in xrange(N)]
for d2 in D2:
frag_d2 = set( [i for i in xrange(d2.start, d2.end+1 ) ] )
for e in frag_d2:
arr2[e] = 1
N11 = [ arr1[i] & arr2[i] for i in xrange(N) ]
N01 = [ arr1[i] & ~arr2[i] for i in xrange(N) ]
N10 = [ ~arr1[i] & arr2[i] for i in xrange(N) ]
T = N * (N-1) / 2
N11 = sum(N11)
N01 = sum(N01)
N10 = sum(N10)
N00 = T - N11 - N01 - N10
# mutual information
# calculate total length
MI = 0
minD = min( min( [d.start for d in D1]) , min([d.start for d in D2]))
maxD = max( max( [d.end for d in D1] ) , max([d.end for d in D2]))
L = maxD - minD + 1
# print "Len is", L, minD, maxD
# create a collection of domains and inter-domains
start = minD
D1_plus = []
# print D1
# print minD
for d1 in D1:
if start < d1.start:
D1_plus.append( Domain(start, d1.start-1) )
# print "added", D1_plus[-1]
D1_plus.append(d1)
start = d1.end+1
if d1.end < maxD:
D1_plus.append( Domain(d1.end+1, maxD) )
# print "\tNew lengths1", len(D1_plus), len(D1)
# create a collection of domains and inter-domains
start = minD
D2_plus = []
for d2 in D2:
if start < d2.start:
D2_plus.append( Domain(start, d2.start-1) )
D2_plus.append(d2)
start = d2.end+1
if d2.end < maxD:
D2_plus.append( Domain(d2.end+1, maxD) )
# print "\tNew lengths2", len(D2_plus), len(D2)
# print D1_plus
# print D2_plus
p_xs = [ (d.end + 1 - d.start) * 1.0 / L for d in D1_plus]
p_ys = [ (d.end + 1 - d.start) * 1.0 / L for d in D2_plus]
for i in xrange(len(D1_plus)):
d1 = D1_plus[i]
p_x = p_xs[i]
for j in xrange(len(D2_plus)):
d2 = D2_plus[j]
p_y = p_ys[j]
overlap = min(d2.end, d1.end) + 1 - max(d1.start, d2.start)
if overlap <= 0: continue
p_xy = overlap * 1.0 / L
MI += p_xy * math.log( p_xy / (p_x * p_y) )
# VI
H_1 = -sum( [p * math.log(p) for p in p_xs] )
H_2 = -sum( [p * math.log(p) for p in p_ys] )
VI = H_1 + H_2 - 2*MI
VI_norm = VI / math.log(N)
# print "\tH1, H2, 2MI, VI", H_1, H_2, 2*MI, VI
#return N11 * 1.0 / (N11 + N01 + N10), (N11 + N00) * 1.0 / T, MI
return N11 * 1.0 / (N11 + N01 + N10), MI, VI_norm
bingren = sys.argv[1]
dp_domains = sys.argv[2:-1]
input_chromo = sys.argv[-1]
step = 40000
print "------------------------------------------------"
print "Input chromo " + input_chromo
B = {}
avg_len = 0
with open(bingren, 'r') as f:
for line in f:
parts = line.strip().split()
chromo = parts[0]
start = parts[1]
stop = parts[2]
if chromo != input_chromo: continue
if not (chromo in B): B[chromo] = []
B[chromo].append( Domain(int(start) / step, int(stop)/ step ) )
avg_len += int(stop) - int(start)
#print "Avg B.R. domain length, in bs", avg_len * 1.0 / len([ [(d.end - d.start) for d in D] for D in B.values()])
D_res = {}
avg_len = 0
for file_name in dp_domains:
parts = file_name.split('.')
res = parts[-1]
if 'alpha' in res:
res = 1
elif len(res) == 5:
res = int(res[:2]) * 0.01
elif len(res) == 4:
res = int(res[:1]) * 0.1
D_res[res] = []
with open(file_name, 'r') as f:
# parse filename, get resolutions
for line in f:
parts = line.strip().split()
chromo = parts[0]
start = parts[1]
stop = parts[2]
if chromo != input_chromo: continue
D_res[res].append( Domain(int(start)/ step, int(stop)/step ) )
avg_len += int(stop) - int(start)
#print "Avg our domain length, in bs", avg_len * 1.0 / len([ (d.end - d.start) for d in D.values() ] )
overlapChrom = {}
brCoveredByUs = {}
usCoveredByBR = {}
Jacc = {}
MI = {}
VI = {}
total_br_len = 0
N = max( [d.end for d in B[input_chromo] ] )
N = max (N, max( [ max([d.end for d in d_lists]) for d_lists in D_res.values() ] ) )
for domain in B[input_chromo]:
total_br_len += domain.end - domain.start
for res, D in D_res.iteritems():
print "Resolution", res
total_overlap = 0
for domain in B[input_chromo]:
for our_d in D:
if (domain.start <= our_d.start and our_d.start < domain.end) or (domain.start <= our_d.end and our_d.end < domain.end) or (our_d.start <= domain.start and domain.end < our_d.end):
total_overlap += min(domain.end, our_d.end) - max(domain.start, our_d.start)
j, mi, vi = jaccard(B[input_chromo], D, N+1)
overlapChrom[res] = total_overlap * 100.0 / N
brCoveredByUs[res] = total_overlap * 100.0 / total_br_len
usCoveredByBR[res] = total_overlap * 100.0 / sum( [our_d.end+1-our_d.start for our_d in D ] )
Jacc[res] = j * 100.0
MI[res] = mi
VI[res] = vi
####################################
# save data
####################################
fname = "similarities."+input_chromo+".data"
with open(fname, 'w') as f:
f.write("Resolution\n")
Z = sorted( [(k,v) for k,v in overlapChrom.iteritems() ], key=lambda x: x[0] )
X = [k for k,v in Z]
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("Overlap\n")
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("brCoveredByUs\n")
Z = sorted( [(k,v) for k,v in brCoveredByUs.iteritems() ], key=lambda x: x[0] )
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("usCoveredByBR\n")
Z = sorted( [(k,v) for k,v in usCoveredByBR.iteritems() ], key=lambda x: x[0] )
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("Jaccard\n")
Z = sorted( [(k,v) for k,v in Jacc.iteritems() ], key=lambda x: x[0] )
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("MI\n")
Z = sorted( [(k,v) for k,v in MI.iteritems() ], key=lambda x: x[0] )
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
f.write("VI\n")
Z = sorted( [(k,v) for k,v in VI.iteritems() ], key=lambda x: x[0] )
Y = [v for k,v in Z]
f.write( "\t".join( map(str, X) ) + "\n" )
print "Data saved to", fname
####################################
#
# plot things
#
####################################
plt.clf()
font = {'family' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
plt.xlabel("$\gamma$, resolution parameter")
ymin = 100
Z = sorted( [(k,v) for k,v in overlapChrom.iteritems() ], key=lambda x: x[0] )
X = [k for k,v in Z]
Y = [v for k,v in Z]
ymin = min(ymin, min(Y))
plt.plot(X, Y, '-',label='overlap')
# Z = sorted( [(k,v) for k,v in brCoveredByUs.iteritems() ], key=lambda x: x[0] )
# X = [k for k,v in Z]
# Y = [v for k,v in Z]
# ymin = min(ymin, min(Y))
# plt.plot(X, Y, '-', label='BRcoveredByUs')
# Z = sorted( [(k,v) for k,v in usCoveredByBR.iteritems() ], key=lambda x: x[0] )
# X = [k for k,v in Z]
# Y = [v for k,v in Z]
# ymin = min(ymin, min(Y))
# plt.plot(X, Y, '-', label='usCoveredByBR')
Z = sorted( [(k,v) for k,v in Jacc.iteritems() ], key=lambda x: x[0] )
X = [k for k,v in Z]
Y = [v for k,v in Z]
ymin = min(ymin, min(Y))
plt.plot(X, Y, '-', label='jaccard')
# Z = sorted( [(k,v) for k,v in AdjRand.iteritems() ], key=lambda x: x[0] )
# X = [k for k,v in Z]
# Y = [v for k,v in Z]
# ymin = min(ymin, min(Y))
# plt.plot(X, Y, '-', label='Adjusted Rand')
#plt.ylim([ymin-1, 101])
# Z = sorted( [(k,v) for k,v in MI.iteritems() ], key=lambda x: x[0] )
# X = [k for k,v in Z]
# Y = [v*10 for k,v in Z]
# ymin = min(ymin, min(Y))
# plt.plot(X, Y, '-', label='mutual information, 10X')
Z = sorted( [(k,v) for k,v in VI.iteritems() ], key=lambda x: x[0] )
X = [k for k,v in Z]
Y = [v*100 for k,v in Z]
ymin = min(ymin, min(Y))
plt.plot(X, Y, '-', label='VI, 100X')
plt.legend()
# plt.title(input_chromo)
figname = "overlap_br_ours_" + input_chromo + ".pdf"
plt.savefig(figname)
print "Figure saved to", figname
#plt.show()
| gpl-3.0 |
lemieuxl/pyplink | docs/conf.py | 1 | 5505 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyplink documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 10 09:00:31 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyplink'
copyright = '2017, Louis-Philippe Lemieux Perreault'
author = 'Louis-Philippe Lemieux Perreault'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pyplink
version = ".".join(pyplink.__version__.split(".")[:-1])
release = pyplink.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'lemieuxl',
'github_repo': 'pyplink',
'github_button': False,
'fixed_sidebar': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyplinkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyplink.tex', 'pyplink Documentation',
'Louis-Philippe Lemieux Perreault', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyplink', 'pyplink Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyplink', 'pyplink Documentation',
author, 'pyplink', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/3': None,
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
}
| mit |
sambitgaan/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
acuzzio/GridQuantumPropagator | src/quantumpropagator/GeneralFunctions.py | 1 | 25256 | '''
This is the module for general purposes functions
'''
import multiprocessing as mp
import numpy as np
import pandas as pd
import yaml
import sys
import math
import pickle
import glob
from .h5Reader import retrieve_hdf5_data
#Debug time
#import pdb
#pdb.set_trace() #to debug h=help
def check_time_stamps_in_wf_folder():
'''
This function is used to check the times into a folder of wf.
Mainly to know the time of those frames.
to be launched from inside the folder.
'''
list_files = sorted(glob.glob('Gau*.h5'))
for single_file in list_files:
times = retrieve_hdf5_data(single_file,'Time')
#print('{:7.3f} fs / {8.3f} AU : {}'.format(times[0], times[1], single_file))
print('{:7.3f} fs : {}'.format(times[0], single_file))
def create_list_of_colors(list_itself, cmap=None):
'''
This function is to create colors for graphs which are different when you
have a variable number of elements
list_itself :: List <- the list that you want to color
cmap :: String <- the colors you want
'''
cmap = cmap or 'hsv'
viridis = cm.get_cmap(cmap,12) # I create the CMAP object
return viridis(np.linspace(0,1, len(list_itself)+1))
def pickleLoad(fn):
'''
tedious to remember protocol flag and stuffs
fn :: FilePath
'''
return pickle.load(open(fn,'rb'))
def pickleSave(fn,thing):
'''
tedious part 2
fn :: FilePath
thing :: Structure to save
'''
with open(fn, "wb" ) as pickle_file:
pickle.dump(thing, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
def find_numpy_index_minumum(array):
'''
I always forget this syntax. Given a numpy array, will give back the indexes of the minimum
array :: numpy array
'''
return (np.unravel_index(array.argmin(), array.shape))
def equilibriumIndex(fn,dataDict):
'''
given the path of direction file and the dataDict, it gives back the index of equilibrium
points in the array
fn :: String -> filePath
dataDict :: {}
'''
phis,gams,thes = readDirectionFile(fn)
gsm_phi_ind = dataDict['phis'].index(phis[0])
gsm_gam_ind = dataDict['gams'].index(gams[0])
gsm_the_ind = dataDict['thes'].index(thes[0])
print('Equilibrium points found at : ({},{},{})'.format(gsm_phi_ind, gsm_gam_ind, gsm_the_ind))
return (gsm_phi_ind, gsm_gam_ind, gsm_the_ind)
def stringTransformation3d(fn):
'''
transform the string of the form
'h5/zNorbornadiene_N006-400_P014-800_P085-500.rassi.h5'
into 3 numbers and 3 labels
'''
fn1 = fn.split('.')[0] # h5/zNorbornadiene_N006-400_P014-800_P085-500
# str1 = 'N006-400' -> axis1 = -6.4
[str1,str2,str3] = fn1.split('_')[1:]
[axis1,axis2,axis3] = [
labTranform(x) for x in
[str1,str2,str3]]
# phi are invariate
axis1 = axis1/100
# gamma are converted to radians
axis2 = np.deg2rad(axis2)
# theta are divided by 2 and converted to radians
axis3 = np.deg2rad(axis3/2)
return(axis1,str1,axis2,str2,axis3,str3)
def fromLabelsToFloats(dataDict):
'''
takes the datadict and returns the three arrays of coordinates values
'''
phis = labTranformA(dataDict['phis'])/100
gams = np.deg2rad(labTranformA(dataDict['gams']))
thes = np.deg2rad(labTranformA(dataDict['thes'])/2)
return(phis,gams,thes)
def fromFloatsToLabels(phis,gams,thes):
'''
it does the opposite of fromLabelsToFloats
phis,gams,thes :: tuple of three np.array(floats)
'''
phiStrings = labTranformReverseA(phis*100)
gamStrings = labTranformReverseA(np.rad2deg(gams))
theStrings = labTranformReverseA(np.rad2deg(thes*2))
return phiStrings, gamStrings, theStrings
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, bar_length=60):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
total = total -1
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '*' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def bring_input_to_AU(iDic):
'''
this function is here to make the conversions between fs/ev and AU
inputDict :: Dict
'''
iDic['dt'] = fromFsToAu(iDic['dt'])
iDic['fullTime'] = fromFsToAu(iDic['fullTime'])
# change sigmas and T_0s
#iDic['pulseX'][2] = fromFsToAu(iDic['pulseX'][2])
#iDic['pulseX'][4] = fromFsToAu(iDic['pulseX'][4])
#iDic['pulseY'][2] = fromFsToAu(iDic['pulseY'][2])
#iDic['pulseY'][4] = fromFsToAu(iDic['pulseY'][4])
#iDic['pulseZ'][2] = fromFsToAu(iDic['pulseZ'][2])
#iDic['pulseZ'][4] = fromFsToAu(iDic['pulseZ'][4])
return (iDic)
def readDirectionFile(fn):
'''
fn :: filePath
'''
with open(fn,'r') as f:
f.readline()
phis = f.readline()
f.readline()
f.readline()
gammas = f.readline()
f.readline()
f.readline()
thetas = f.readline()
return(phis.rstrip().split(' '),gammas.rstrip().split(' '), thetas.rstrip().split(' '))
def printDict(dictionary):
'''
pretty printer for dictionary
dictionary :: Dictionary
'''
for x in dictionary:
print('{} -> {}'.format(x,dictionary[x]))
def printDictKeys(dictionary):
print(dictionary.keys())
def readGeometry(fn):
'''
It gives back the geometry from a file
fn :: String <- filepath
'''
with open(fn,'r') as f:
data = f.readlines()
natom = int(data[0])
title = data[1]
geomVector = np.empty((natom,3))
atomType = []
for i in range(natom):
atom = list(filter(None, data[i+2].split(' ')))
atomType.append(atom[0])
geomVector[i,0] = float(atom[1])
geomVector[i,1] = float(atom[2])
geomVector[i,2] = float(atom[3])
return(natom,title,atomType,geomVector)
def calculateGradientOnMatrix0(newNAC,dist):
'''
This calculate a matrix gradient along axis 0
newNAC :: np.array[Double,Double] - derivative coupling matrix
dist :: np.array[Double] - x values
'''
deltaX = dist[1] - dist[0]
allM = np.apply_along_axis(np.gradient, 0, newNAC, deltaX)
return allM
def asyncFun(f, *args, **kwargs):
'''
Executes the f function on another thread.
'''
job = mp.Process(target=f, args=args, kwargs=kwargs)
job.start()
def abs2(x):
'''
x :: complex
This is a reimplementation of the abs value for complex numbers
'''
return x.real**2 + x.imag**2
def chunksOf(xs, n):
"""Yield successive n-sized chunks from xs"""
shape0 = xs.shape[0]
for i in range(0, shape0, n):
yield xs[i:i + n]
def chunksOfList(xs, n):
"""Yield successive n-sized chunks from xs"""
for i in range(0, len(xs), n):
yield xs[i:i + n]
def population(grid):
'''
grid :: np.array[Complex]
it calculates the populations of a 1D grid
'''
pop = np.apply_along_axis(singlepop,1,grid)
return(pop,sum(pop))
def ndprint(a, format_string ='{0:15.12f}'):
'''
a = [Double] :: list of doubles
It returns a single string of formatted numbers out of a list of doubles
'''
return " ".join([format_string.format(v,i) for i,v in enumerate(a)])
def singlepop(GridSingleState):
'''
Calculates the population of a single state grid (1D)
'''
return sum(np.apply_along_axis(abs2,0,GridSingleState))
def groundState(n):
'''
n :: Int
given the number of states, this creates an array:
[1,0,0,0,0, ... , 0]
'''
a = np.zeros(n)
a[0]=1.0
return a
def gaussian(x, mu, sig):
'''
It calculates the gaussian value at point x. This gaussian is not normalized because
in this problem the normalization is done at the end.
x :: Double - the x point
mu :: Double - the displacement on the x axis
sig :: Double - the sigma value
'''
return (np.exp(-np.power((x - mu)/sig, 2.)/2)) + (0j)
def gaussian2(x, x0, gw, moment=None):
'''
It calculates the gaussian value at point x. This gaussian is not normalized because
in this problem the normalization is done at the end.
x :: Double - the x point
x0 :: Double - the displacement on the x axis
gw :: Double - the value of the gw factor in front of the equation
moment :: Double - the initial moment given to the WF
'''
moment = moment or 0
return np.exp((- gw * (x - x0)**2) / 2) * np.exp(1j*moment*(x - x0))
def saveComplex(fn,array):
""" Saves a complex array into a txt file """
# in one column
#np.savetxt(fn, array.view(float))
np.savetxt(fn, array.view(float).reshape(-1, 2))
def loadComplex(fn):
""" Load a complex array from a txt file """
# in one column
#array = np.loadtxt('outfile.txt').view(complex)
return np.loadtxt(fn).view(complex).reshape(-1)
def print2ArrayInColumns(array1,array2,filename):
""" Saves 2 arrays into 2 columns of a file"""
np.savetxt(filename,np.stack((array1,array2),1))
def dipoleMoment(states,matMu):
'''
dipole moment calculation
'''
nstates = states.size
dipole = np.zeros(3, dtype = complex)
for component in [0,1,2]:
summa = 0
for Ici in range(nstates):
for Icj in range(nstates):
a = np.conjugate(states[Ici])
b = states[Icj]
c = matMu[component, Ici, Icj]
summa += (a*b*c)
#summa = summa + (a*b*c)
dipole[component] = summa
return dipole
def fromAuToFs(n):
''' from au to femtosecond '''
return (n*0.02418884254)
def fromFsToAu(n):
''' from femtosecond to au '''
return (n*41.341374575751)
def fromBohToAng(n):
''' From Bohr to Angstrom conversion - n :: Double '''
return (n * 0.529177249)
def fromAngToBoh(n):
''' From Angstrom to Bohr conversion - n :: Double '''
return (n * 1.889725988)
def fromEvtoHart(n):
''' From ElectronVolt to Hartree conversion - n :: Double '''
return (n * 0.0367493)
def fromHartoEv(n):
''' From Hartree to ElectronVolt conversion - n :: Double '''
return (n * 27.211402)
def fromCmMin1toHartree(n):
''' from cm-1 to hartree conversion - n :: Double '''
return (n*4.5563e-06)
def fromHartreetoCmMin1(n):
''' from hartree to cm-1 conversion - n :: Double '''
return (n/4.5563e-06)
def fromCmMin1toFs(n):
''' from cm-1 to fs conversion - n :: Double '''
return (1/(fromHartreetoCmMin1(n))*1.88365157e+4)
# https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/
# https://betterexplained.com/articles/an-interactive-guide-to-the-fourier-transform/
def DFT_slow(x):
""" Compute the discrete Fourier Transform of the 1D array x """
x = np.asarray(x, dtype=float)
N = x.shape[0]
n = np.arange(N)
k = n.reshape((N, 1))
M = np.exp(-2j * np.pi * k * n / N)
return np.dot(M, x)
def calcBond(geom,atom1,atom2):
'''
returns the bond length between atom1 and atom2
geom :: np.array(natoms,3)
atom1 = integer
atom2 = integer
'''
a = geom[atom1-1]
b = geom[atom2-1]
bond = np.linalg.norm(a-b)
return bond
def calcAngle(geom,atom1,atom2,atom3):
'''
returns the angle between atom1,2 and 3
geom :: np.array(natoms,3)
atom1 = integer
atom2 = integer
atom3 = integer
'''
a = geom[atom1-1]
b = geom[atom2-1]
c = geom[atom3-1]
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return(np.degrees(angle))
def calcDihedral(geom,atom1,atom2,atom3,atom4):
'''
returns the dihedral of atom1,2,3 and 4
geom :: np.array(natoms,3)
atom1 = integer
atom2 = integer
atom3 = integer
atom4 = integer
'''
a = geom[atom1-1]
b = geom[atom2-1]
c = geom[atom3-1]
d = geom[atom4-1]
print('still have to do it')
print(a,b,c,d)
def massOf(elem):
'''
You get the mass of an element from the label string
elem :: String
'''
dictMass = {'X': 0, 'Ac': 227.028, 'Al': 26.981539, 'Am': 243, 'Sb': 121.757, 'Ar':
39.948, 'As': 74.92159, 'At': 210, 'Ba': 137.327, 'Bk': 247, 'Be':
9.012182, 'Bi': 208.98037, 'Bh': 262, 'B': 10.811, 'Br': 79.904,
'Cd': 112.411, 'Ca': 40.078, 'Cf': 251, 'C': 12.011, 'Ce': 140.115,
'Cs': 132.90543, 'Cl': 35.4527, 'Cr': 51.9961, 'Co': 58.9332, 'Cu':
63.546, 'Cm': 247, 'Db': 262, 'Dy': 162.5, 'Es': 252, 'Er': 167.26,
'Eu': 151.965, 'Fm': 257, 'F': 18.9984032, 'Fr': 223, 'Gd': 157.25,
'Ga': 69.723, 'Ge': 72.61, 'Au': 196.96654, 'Hf': 178.49, 'Hs':
265, 'He': 4.002602, 'Ho': 164.93032, 'H': 1.00794, 'In': 114.82,
'I': 126.90447, 'Ir': 192.22, 'Fe': 55.847, 'Kr': 83.8, 'La':
138.9055, 'Lr': 262, 'Pb': 207.2, 'Li': 6.941, 'Lu': 174.967, 'Mg':
24.305, 'Mn': 54.93805, 'Mt': 266, 'Md': 258, 'Hg': 200.59, 'Mo': 95.94,
'Nd': 144.24, 'Ne': 20.1797, 'Np': 237.048, 'Ni': 58.6934, 'Nb': 92.90638,
'N': 14.00674, 'No': 259, 'Os': 190.2, 'O': 15.9994, 'Pd': 106.42, 'P':
30.973762, 'Pt': 195.08, 'Pu': 244, 'Po': 209, 'K': 39.0983, 'Pr':
140.90765, 'Pm': 145, 'Pa': 231.0359, 'Ra': 226.025, 'Rn': 222,
'Re': 186.207, 'Rh': 102.9055, 'Rb': 85.4678, 'Ru': 101.07, 'Rf':
261, 'Sm': 150.36, 'Sc': 44.95591, 'Sg': 263, 'Se': 78.96, 'Si':
28.0855, 'Ag': 107.8682, 'Na': 22.989768, 'Sr': 87.62, 'S': 32.066,
'Ta': 180.9479, 'Tc': 98, 'Te': 127.6, 'Tb': 158.92534, 'Tl':
204.3833, 'Th': 232.0381, 'Tm': 168.93421, 'Sn': 118.71, 'Ti':
47.88, 'W': 183.85, 'U': 238.0289, 'V': 50.9415, 'Xe': 131.29,
'Yb': 173.04, 'Y': 88.90585, 'Zn': 65.39, 'Zr': 91.224}
return(dictMass[elem])
def saveTraj(arrayTraj, labels, filename, convert=None):
'''
given a numpy array of multiple coordinates, it prints the concatenated xyz file
arrayTraj :: np.array(ncoord,natom,3) <- the coordinates
labels :: [String] <- ['C', 'H', 'Cl']
filename :: String <- filepath
convert :: Bool <- it tells if you need to convert from Boh to Ang (default True)
'''
convert = convert or False
(ncoord,natom,_) = arrayTraj.shape
fn = filename + '.xyz'
string = ''
for geo in range(ncoord):
string += str(natom) + '\n\n'
for i in range(natom):
if convert:
string += " ".join([labels[i]] +
['{:10.6f}'.format(fromBohToAng(num)) for num
in arrayTraj[geo,i]]) + '\n'
else:
string += " ".join([labels[i]] +
['{:10.6f}'.format(num) for num
in arrayTraj[geo,i]]) + '\n'
with open(fn, "w") as myfile:
myfile.write(string)
print('\nfile {0} written:\n\nvmd {0}'.format(fn))
def scanvalues(first,second,resolution):
'''
This uses numpy to get the values printed out in a single line.
first :: Double <- start of the interval
second :: Double <- end of the interval
resolution :: Int <- resolution (how many points in the interval
'''
vec = np.linspace(first,second,resolution)
oneline = " ".join(['{:7.3f}'.format(b) for b in vec])
return oneline
def printMatrix2D(mat, pre=None, thr=None):
'''
mat :: np.array(X,Y) <- I use this for overlap matrix
pre :: Int <- the precision for the output
thr :: Double <- value smaller than this are set to 0
given a 2d array in numpy, it prints the matrix on the screen
'''
pre = pre or 6
thr = thr or 0.0
pd.set_option('precision', pre)
pd.set_option('chop_threshold', thr)
(siza,_) = mat.shape
indexes = np.arange(siza) + 1
out = pd.DataFrame(mat, index=indexes, columns=indexes)
print(out)
def createTabellineFromArray(arr):
'''
arr :: np.array(Double)
This function will take a 1D numpy array and create a matrix with
the element multiplications (the product between the cartesian product)
'''
length = arr.size
mat = np.empty((length,length))
for ii in np.arange(length):
for kk in np.arange(length):
mat[ii,kk]=arr[ii]*arr[kk]
return(mat)
def labTranformReverseA(floArray):
'''
labTranformReverse applied to an array
'''
return [ labTranformReverse(x) for x in floArray ]
def labTranformReverse(flo):
'''
from the float number to the labeling of files used in this project
'''
flo2 = '{:+08.3f}'.format(flo)
return flo2.replace('-','N').replace('.','-').replace('+','P')
def labTranform(string):
'''
transform the string of the form
P014-800
into his +14.8 float
'''
return (float(string.replace('-','.').replace('N','-').replace('P','+')))
def labTranformA(strings):
'''
transform an array of string of the form
P014-800
into his +14.8 float type numpy array : D
'''
return (np.array([labTranform(a) for a in strings]))
def loadInputYAML(fn):
'''
this function reads the input file and returns a dictionary with inputs
fn :: filePath
'''
with open(fn, 'r') as f:
diction = yaml.safe_load(f)
return diction
def generateNorbGeometry(phi,gam,the, vector_res=None):
'''
This function generates an xyz given the value of the three angles
phi,gam,the :: Double <- the three angles
vector_res :: Boolean <- if false it saves file
'''
vector_res = vector_res or False
fnO = 'zNorbornadiene_{:+08.3f}_{:+08.3f}_{:+08.3f}'.format(phi,gam,the)
fn = fnO.replace('-','N').replace('.','-').replace('+','P')
atomT = ['C','C','C','H','H','H','H']
fixed = np.array([[0.000000, 0.000000, 1.078168],
[0.000000, -1.116359, 0.000000],
[0.000000, 1.116359, 0.000000],
[0.894773, 0.000000, 1.698894],
[-0.894773, 0.000000, 1.698894],
[0.000000, -2.148889, 0.336566],
[0.000000, 2.148889, 0.336566]])
rBond = 1.541 # distance of bridge
L = 1.116359 # half distance between C2-C3
chBond = 1.077194 # distance between moving C and H
the2 = np.deg2rad(the/2)
gam2 = np.deg2rad(gam)
torsionalCI = 6 # values for phi AT WHICH
# this is the vector that displaces our 8 moving atoms from the CLOSEST CI I
# can reach with the old scan and the real conical intersection
deltasCIN = np.array([
[-0.165777, 0.067387, 0.016393],
[-0.14517 , -0.096085, -0.143594],
[ 0.165162, -0.067684, 0.015809],
[ 0.145943, 0.095734, -0.143995],
[-0.520977, 0.086124, 0.316644],
[ 0.450303, -0.048 , 0.245432],
[ 0.520405, -0.086941, 0.316594],
[-0.451602, 0.047331, 0.24554 ],
])
xC1 = -rBond * np.cos(gam2) * np.sin(the2)
yC1 = L + rBond * - np.sin(gam2)
zC1 = -rBond * np.cos(the2) * np.cos(gam2)
xC2 = -rBond * np.cos(gam2) * np.sin(-the2)
yC2 = L - rBond * np.sin(gam2)
zC2 = -rBond * np.cos(-the2) * np.cos(gam2)
xC3 = rBond * np.cos(gam2) * np.sin(+the2)
yC3 = -L + rBond * np.sin(gam2)
zC3 = -rBond * np.cos(+the2) * np.cos(gam2)
xC4 = rBond * np.cos(gam2) * np.sin(-the2)
yC4 = -L + rBond * np.sin(gam2)
zC4 = -rBond * np.cos(-the2) * np.cos(gam2)
# in the end we did this with cartesian... interesting workaround...
# desperation?
dx = +0.694921
dy = +0.661700
dz = +0.494206
xH1 = xC1 - dx
yH1 = yC1 + dy
zH1 = zC1 - dz
xH2 = xC2 + dx
yH2 = yC2 + dy
zH2 = zC2 - dz
xH3 = xC3 + dx
yH3 = yC3 - dy
zH3 = zC3 - dz
xH4 = xC4 - dx
yH4 = yC4 - dy
zH4 = zC4 - dz
newAtoms = np.array([[xC1,yC1,zC1], [xC2,yC2,zC2], [xC3,yC3,zC3], [xC4,yC4,zC4],
[xH1,yH1,zH1], [xH2,yH2,zH2], [xH3,yH3,zH3], [xH4,yH4,zH4]])
this = ((phi/torsionalCI) * deltasCIN)
newCorrectedAtoms = newAtoms + this
new = np.append(fixed,newCorrectedAtoms,0)
atomTN = atomT + ['C', 'C', 'C', 'C', 'H', 'H', 'H', 'H']
if vector_res:
return(new)
else:
# saveTraj works on LIST of geometries, that is why the double list brackets
saveTraj(np.array([new]),atomTN,fn)
def file_len(fname):
'''
gives the number of lines in the file
fn :: FilePath
'''
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def frames_counter(fn):
'''
Given a trajectory files, gives back number of frame and number of atoms.
fn :: FilePath
'''
f = open(fn)
atomN = int(f.readline())
f.close()
with open(fn) as f:
for i, l in enumerate(f):
pass
frameN = int((i + 1)/(atomN+2))
return (atomN,frameN)
def readTrajectory(fn):
'''
reads a md.xyz format file and gives back a dictionary with geometries and all the rest
'''
atomsN,frameN = frames_counter(fn)
print('\nAtoms: {}\nFrames: {}\n'.format(atomsN,frameN))
geom = np.empty((frameN,atomsN,3))
atomT = []
with open(fn) as f:
for i in range(frameN):
f.readline()
f.readline()
for j in range(atomsN):
a = f.readline()
bb = a.split(" ")
b = [ x for x in bb if x != '']
geom[i,j] = [float(b[1]),float(b[3]),float(b[2])]
if i == 0:
atomT.append(b[0])
final_data = {
'geoms' : geom,
'atomsN' : atomsN,
'frameN' : frameN,
'atomT' : atomT,
}
return final_data
def bondL():
bondLengths = {
'HH' : 0.74,
'CH' : 1.09,
'HO' : 0.96,
'HN' : 1.02,
'CC' : 1.54,
'CN' : 1.47,
'CO' : 1.43,
'NN' : 1.45,
'NO' : 1.40,
'OO' : 1.48,
'HS' : 1.34,
'OS' : 1.43,
'CS' : 1.82,
'NS' : 0.50,
'SS' : 1.0,
'II' : 1.0,
'MM' : 1.0,
'IM' : 2.8,
'CM' : 1.0,
'MS' : 1.0,
'HM' : 1.0,
'CI' : 1.0,
'IS' : 1.0,
'HI' : 1.0
}
return bondLengths
def transformTrajectoryIntoBlenderData(name,traj):
'''
takes a trajectory dictionary and gives back new kind of data for blender
'''
geoms = traj['geoms']
atomsN = traj['atomsN']
frameN = traj['frameN']
atomT = traj['atomT']
BL = bondL()
paletti = []
spheres = []
for i in range(atomsN):
spheres.append((i,atomT[i],geoms[:,i]))
for j in range(i):
unoL = atomT[i]
dueL = atomT[j]
geom1Ini = geoms[0,i]
geom2Ini = geoms[0,j]
toCheckDistance = ''.join(sorted(unoL + dueL))
bondLengthMax = BL[toCheckDistance] + 0.3
bondIni = np.linalg.norm((geom2Ini-geom1Ini))
#print('{} {} {} blMax {}, bondIni {}'.format(i,j,toCheckDistance,bondLengthMax,bondIni))
if bondIni < bondLengthMax:
print('There should be a bond between {}{} and {}{}'.format(unoL, i, dueL, j))
if unoL == dueL:
pos1 = geoms[:, i]
pos2 = geoms[:, j]
paletti.append((pos1,pos2,unoL))
else:
pos1 = geoms[:, i]
pos2 = geoms[:, j]
center = (pos1 + pos2) / 2
paletti.append((pos1,center,unoL))
paletti.append((pos2,center,dueL))
print('{} {}'.format(atomsN,frameN))
blender_dict = {'spheres' : spheres, 'paletti' : paletti}
pickleSave(name, blender_dict)
print(paletti)
print('There are {} atoms and {} paletti'.format(len(spheres),len(paletti)))
if __name__ == "__main__":
fn = '/home/alessio/Desktop/lol.xyz'
name = '/home/alessio/Desktop/lol.p'
a = readTrajectory(fn)
transformTrajectoryIntoBlenderData(name,a)
#from time import sleep
## A List of Items
#items = list(range(0, 57))
#l = len(items)
## Initial call to print 0% progress
#printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', bar_length = 50)
#for i, item in enumerate(items):
# # Do stuff...
# sleep(0.1)
# # Update Progress Bar
# printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', bar_length = 50)
| gpl-3.0 |
awerries/rpi_sensors | Python_Tools/frequency_analysis.py | 2 | 3957 | #!/usr/bin/env python
"""This script processes any column of measurement and performs the Fast Fourier Transform (a faster DTFT).
This is meant to provide insight for estimating the power spectral density of sensor measurements.
Adam Werries, 2016"""
import os
import sys
import numpy
import argparse
from matplotlib import pyplot as pplot
from time import mktime
from dateutil.parser import parse as parsetime
def parse_datafile(filename, time_column, data_column):
"""Gets specified time_column and data_column numbers from a comma-separated file. Start counting at zero."""
f = open(filename,'r')
time = list()
data = list()
for line in f:
line = line.split(',')
if len(line) > 1:
# Convert iso8601 time to unix time
time.append(float(line[time_column]))
# Convert string value to int
data.append(float(line[data_column]))
# Convert lists to numpy-arrays for convenience,
time = numpy.array(time)
time = time - time.min()
data = numpy.array(data)
return (time, data)
def calculate_timestep(time):
"""Determine average spacing of time data (list). Data is assumed to be taken at a regular interval."""
steps = numpy.diff(time)
timestep = numpy.mean(steps[2:])
stddev = numpy.std(steps[2:])
timemax = numpy.max(steps[2:])
timemin = numpy.min(steps[2:])
print('Timesteps info [mean: {0}, std: {1}, min: {2}, max: {3}'.format(timestep, stddev, timemin, timemax))
return timestep
def calculate_fft(timestep, data):
"""Performs FFT on time-signal, returning magnitudes at each frequency."""
fourier = numpy.fft.rfft(data)
fft_magnitude = numpy.absolute(fourier)
frequency = numpy.fft.fftfreq(fft_magnitude.size, d = timestep)
# Get rid of negative frequencies, they're not useful for visualization!
# freq = list()
# mag = list()
# for i,f in enumerate(frequency):
# if f >= 0:
# freq.append(f)
# mag.append(fft_magnitude[i])
# return (numpy.array(freq), numpy.array(mag))
return frequency, fft_magnitude
def plot_time(filename, time, data, title):
"""Plots time-based data."""
pplot.figure()
pplot.plot(time, data, 'bo')
pplot.title(title)
pplot.xlabel('Time (s)')
pplot.ylabel('Sensor Reading')
pplot.show()
#pplot.savefig('{0}_time.png'.format(filename),dpi=300)
def plot_fft(filename, freq, mag, title):
"""Plots FFT data alongside 1/f (pink) and 1/f^2 (brown) noise curves"""
pplot.figure()
pplot.plot(freq,mag, 'bo', label='Magnitudes')
print(max(mag))
pplot.title(title)
pplot.xlabel('Frequency (Hz)')
pplot.xscale('log')
pplot.xlim([1, 400])
pplot.ylabel('Magnitude')
pplot.yscale('log')
pplot.ylim([.1, 1000])
pplot.grid(True,which='both',ls='-',alpha=0.2)
pplot.show()
#pplot.savefig('{0}_fft.png'.format(filename),dpi=300)
def main():
parser = argparse.ArgumentParser(description='Display FFT plot of a chosen time-column and data-column.')
parser.add_argument('-t', '--time_column', type=int, help='Chosen column in file to use for time. Default is 0.', required=True)
parser.add_argument('-d', '--data_column', type=int, help='Chosen column of data to compute FFT with. Default is 1.', required=True)
parser.add_argument('filename', help='Sensor log filename.')
args = parser.parse_args()
time, data = parse_datafile(args.filename, args.time_column, args.data_column)
filename, file_extension = os.path.splitext(args.filename)
plot_time(filename, time, data, 'Time-domain for {0}, column {1}'.format(args.filename, args.data_column))
timestep = calculate_timestep(time)
frequency, fft_magnitude = calculate_fft(timestep, data)
plot_fft(filename, frequency, fft_magnitude, 'Frequency-domain of {0}, column {1}'.format(args.filename, args.data_column))
if __name__ == '__main__':
main()
| mit |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/testing/noseclasses.py | 10 | 2160 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from matplotlib.testing.exceptions import (KnownFailureTest,
KnownFailureDidNotFailTest,
ImageComparisonFailure)
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.
This is based on numpy.testing.noseclasses.KnownFailure.
'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
def addError(self, test, err, *zero_nine_capt_args):
# Fixme (Really weird): if I don't leave empty method here,
# nose gets confused and KnownFails become testing errors when
# using the MplNosePlugin and MplTestCase.
# The *zero_nine_capt_args captures an extra argument. There
# seems to be a bug in
# nose.testing.manager.ZeroNinePlugin.addError() in which a
# 3rd positional argument ("capt") is passed to the plugin's
# addError() method, even if one is not explicitly using the
# ZeroNinePlugin.
pass
| bsd-3-clause |
Cisco-Talos/fnc-1 | tree_model/SentimentFeatureGenerator.py | 1 | 5016 | from FeatureGenerator import *
import pandas as pd
import numpy as np
import cPickle
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.tokenize import sent_tokenize
from helpers import *
class SentimentFeatureGenerator(FeatureGenerator):
def __init__(self, name='sentimentFeatureGenerator'):
super(SentimentFeatureGenerator, self).__init__(name)
def process(self, df):
print 'generating sentiment features'
print 'for headline'
n_train = df[~df['target'].isnull()].shape[0]
n_test = df[df['target'].isnull()].shape[0]
# calculate the polarity score of each sentence then take the average
sid = SentimentIntensityAnalyzer()
def compute_sentiment(sentences):
result = []
for sentence in sentences:
vs = sid.polarity_scores(sentence)
result.append(vs)
return pd.DataFrame(result).mean()
#df['headline_sents'] = df['Headline'].apply(lambda x: sent_tokenize(x.decode('utf-8')))
df['headline_sents'] = df['Headline'].apply(lambda x: sent_tokenize(x))
df = pd.concat([df, df['headline_sents'].apply(lambda x: compute_sentiment(x))], axis=1)
df.rename(columns={'compound':'h_compound', 'neg':'h_neg', 'neu':'h_neu', 'pos':'h_pos'}, inplace=True)
#print 'df:'
#print df
#print df.columns
#print df.shape
headlineSenti = df[['h_compound','h_neg','h_neu','h_pos']].values
print 'headlineSenti.shape:'
print headlineSenti.shape
headlineSentiTrain = headlineSenti[:n_train, :]
outfilename_hsenti_train = "train.headline.senti.pkl"
with open(outfilename_hsenti_train, "wb") as outfile:
cPickle.dump(headlineSentiTrain, outfile, -1)
print 'headline sentiment features of training set saved in %s' % outfilename_hsenti_train
if n_test > 0:
# test set is available
headlineSentiTest = headlineSenti[n_train:, :]
outfilename_hsenti_test = "test.headline.senti.pkl"
with open(outfilename_hsenti_test, "wb") as outfile:
cPickle.dump(headlineSentiTest, outfile, -1)
print 'headline sentiment features of test set saved in %s' % outfilename_hsenti_test
print 'headine senti done'
#return 1
print 'for body'
#df['body_sents'] = df['articleBody'].map(lambda x: sent_tokenize(x.decode('utf-8')))
df['body_sents'] = df['articleBody'].map(lambda x: sent_tokenize(x))
df = pd.concat([df, df['body_sents'].apply(lambda x: compute_sentiment(x))], axis=1)
df.rename(columns={'compound':'b_compound', 'neg':'b_neg', 'neu':'b_neu', 'pos':'b_pos'}, inplace=True)
#print 'body df:'
#print df
#print df.columns
bodySenti = df[['b_compound','b_neg','b_neu','b_pos']].values
print 'bodySenti.shape:'
print bodySenti.shape
bodySentiTrain = bodySenti[:n_train, :]
outfilename_bsenti_train = "train.body.senti.pkl"
with open(outfilename_bsenti_train, "wb") as outfile:
cPickle.dump(bodySentiTrain, outfile, -1)
print 'body sentiment features of training set saved in %s' % outfilename_bsenti_train
if n_test > 0:
# test set is available
bodySentiTest = bodySenti[n_train:, :]
outfilename_bsenti_test = "test.body.senti.pkl"
with open(outfilename_bsenti_test, "wb") as outfile:
cPickle.dump(bodySentiTest, outfile, -1)
print 'body sentiment features of test set saved in %s' % outfilename_bsenti_test
print 'body senti done'
return 1
def read(self, header='train'):
filename_hsenti = "%s.headline.senti.pkl" % header
with open(filename_hsenti, "rb") as infile:
headlineSenti = cPickle.load(infile)
filename_bsenti = "%s.body.senti.pkl" % header
with open(filename_bsenti, "rb") as infile:
bodySenti = cPickle.load(infile)
print 'headlineSenti.shape:'
print headlineSenti.shape
#print type(headlineSenti)
print 'bodySenti.shape:'
print bodySenti.shape
#print type(bodySenti)
return [headlineSenti, bodySenti]
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| apache-2.0 |
zakuro9715/lettuce | lettuce/django/steps/mail.py | 20 | 1903 | """
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
| gpl-3.0 |
CaymanUnterborn/burnman | misc/benchmarks/benchmark.py | 4 | 29408 | from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import os.path
import sys
sys.path.insert(1, os.path.abspath('../..'))
import numpy as np
import matplotlib.pyplot as plt
import burnman
import burnman.eos.birch_murnaghan as bm
import burnman.eos.birch_murnaghan_4th as bm4
import burnman.eos.mie_grueneisen_debye as mgd
import burnman.eos.slb as slb
import burnman.eos.vinet as vinet
import matplotlib.image as mpimg
def check_birch_murnaghan():
"""
Recreates Stixrude and Lithgow-Bertelloni (2005) Figure 1, bulk and shear modulus without thermal corrections
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.844e-6,
'K_0': 259.0e9,
'Kprime_0': 4.0,
'G_0': 175.0e9,
'Gprime_0': 1.7,
'molar_mass': .0,
}
test_mineral.set_method('bm3')
pressure = np.linspace(0., 140.e9, 100)
volume = np.empty_like(pressure)
bulk_modulus = np.empty_like(pressure)
shear_modulus = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm.volume(pressure[i], test_mineral.params)
bulk_modulus[i] = bm.bulk_modulus(volume[i], test_mineral.params)
shear_modulus[i] = bm.shear_modulus_third_order(
volume[i], test_mineral.params) # third order is used for the plot we are comparing against
# compare with figure 1
plt.plot(pressure / 1.e9, bulk_modulus /
1.e9, pressure / 1.e9, shear_modulus / 1.e9)
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig1.png')
plt.imshow(fig1, extent=[0, 140, 0, 800], aspect='auto')
plt.plot(pressure / 1.e9, bulk_modulus / 1.e9,
'g+', pressure / 1.e9, shear_modulus / 1.e9, 'g+')
plt.ylim(0, 800)
plt.xlim(0, 140)
plt.xlabel("Pressure (GPa)")
plt.ylabel("Modulus (GPa)")
plt.title(
"Comparing with Figure 1 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_birch_murnaghan_4th():
"""
Recreates the formulation of the 4th order Birch-Murnaghan EOS as in Ahmad and Alkammash, 2012; Figure 1.
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 10.e-6,
'K_0': 72.7e9,
'Kprime_0': 4.14,
'Kprime_prime_0': -0.0484e-9,
}
test_mineral.set_method('bm4')
pressure = np.linspace(0., 90.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = bm4.volume_fourth_order(
pressure[i], test_mineral.params) / test_mineral.params.get('V_0')
# compare with figure 1
plt.plot(pressure / 1.e9, volume)
fig1 = mpimg.imread('../../burnman/data/input_figures/Ahmad.png')
plt.imshow(fig1, extent=[0., 90., .65, 1.], aspect='auto')
plt.plot(pressure / 1.e9, volume, marker='o',
color='r', linestyle='', label='BM4')
plt.legend(loc='lower left')
plt.xlim(0., 90.)
plt.ylim(.65, 1.)
plt.xlabel("Volume/V0")
plt.ylabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Ahmad et al., (2012)")
plt.show()
def check_vinet():
"""
Recreates Dewaele et al., 2006, Figure 1, fitting a Vinet EOS to Fe data
"""
plt.close()
# make a test mineral
test_mineral = burnman.Mineral()
test_mineral.params = {'name': 'test',
'V_0': 6.75e-6,
'K_0': 163.4e9,
'Kprime_0': 5.38,
}
test_mineral.set_method('vinet')
pressure = np.linspace(17.7e9, 300.e9, 20)
volume = np.empty_like(pressure)
# calculate its static properties
for i in range(len(pressure)):
volume[i] = vinet.volume(pressure[i], test_mineral.params)
# compare with figure 1
plt.plot(pressure / 1.e9, volume / 6.02e-7)
fig1 = mpimg.imread('../../burnman/data/input_figures/Dewaele.png')
plt.imshow(fig1, extent=[0., 300., 6.8, 11.8], aspect='auto')
plt.plot(pressure / 1.e9, volume / 6.02e-7, marker='o',
color='r', linestyle='', label='Vinet Fit')
plt.legend(loc='lower left')
plt.xlim(0., 300.)
plt.ylim(6.8, 11.8)
plt.ylabel("Volume (Angstroms^3/atom")
plt.xlabel("Pressure (GPa)")
plt.title("Comparing with Figure 1 of Dewaele et al., (2006)")
plt.show()
def check_mgd_shim_duffy_kenichi():
"""
Attemmpts to recreate Shim Duffy Kenichi (2002)
"""
plt.close()
# Create gold material from Table 1
gold = burnman.Mineral()
gold.params = {'name': 'gold',
'V_0': 10.22e-6,
'K_0': 167.0e9,
'Kprime_0': 5.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 1.0,
'Debye_0': 170.,
'grueneisen_0': 2.97, # this does better with gr = 2.93. Why?
'q_0': 1.0}
gold.set_method('mgd3')
# Total pressures, pulled from Table 2
ref_pressures = [
np.array([0., 3.55, 7.55, 12.06, 17.16, 22.91, 29.42, 36.77, 45.11, 54.56, 65.29, 77.50, 91.42, 107.32, 125.51, 146.38, 170.38, 198.07])]
ref_pressures.append(
np.array([4.99, 8.53, 12.53, 17.04, 22.13, 27.88, 34.38, 41.73, 50.06, 59.50, 70.22, 82.43, 96.33, 112.22, 130.40, 151.25, 175.24, 202.90]))
ref_pressures.append(
np.array([12.14, 15.69, 19.68, 24.19, 29.28, 35.03, 41.53, 48.88, 57.20, 66.64, 77.37, 89.57, 103.47, 119.35, 137.53, 158.38, 182.36, 210.02]))
ref_pressures.append(
np.array([19.30, 22.84, 26.84, 31.35, 36.44, 42.19, 48.68, 56.03, 64.35, 73.80, 84.52, 96.72, 110.62, 126.50, 144.68, 165.53, 189.51, 217.17]))
eos = mgd.MGD3()
pressures = np.empty_like(ref_pressures)
ref_dv = np.linspace(0.0, 0.34, len(pressures[0]))
ref_volumes = (1 - ref_dv) * gold.params['V_0']
T = np.array([300., 1000., 2000., 3000.])
for t in range(len(pressures)):
for i in range(len(pressures[t])):
pressures[t][i] = eos.pressure(T[t], ref_volumes[i], gold.params)
plt.plot(ref_dv, (pressures[t] / 1.e9 - ref_pressures[t]))
plt.ylim(-1, 1)
plt.ylabel("Difference in pressure (GPa)")
plt.xlabel("1-dV/V")
plt.title("Comparing with Shim, Duffy, and Kenichi (2002)")
plt.show()
def check_mgd_fei_mao_shu_hu():
"""
Benchmark agains Fei Mao Shu Hu (1991)
"""
mgfeo = burnman.Mineral()
mgfeo.params = {'name': 'MgFeO',
'V_0': 11.657e-6,
'K_0': 157.0e9,
'Kprime_0': 4.0,
'G_0': 0.0e9,
'Gprime_0': 0.0,
'molar_mass': .196966,
'n': 2.0,
'Debye_0': 500.,
'grueneisen_0': 1.50,
'q_0': 1.1}
mgfeo.set_method('mgd3')
# pulled from table 1
temperatures = np.array(
[300, 300, 483, 483, 483, 590, 593, 593, 593, 700, 600, 500, 650, 600,
600, 650, 700, 737, 727, 673, 600, 543, 565, 585, 600, 628, 654, 745, 768, 747, 726, 700, 676])
volumes = np.array(
[77.418, 72.327, 74.427, 73.655, 72.595, 74.1, 73.834, 73.101, 70.845, 73.024, 72.630, 68.644, 72.969, 72.324, 71.857,
72.128, 73.283, 73.337, 72.963, 71.969, 69.894, 67.430, 67.607, 67.737, 68.204, 68.518, 68.955, 70.777, 72.921, 72.476, 72.152, 71.858, 71.473])
# change from cubic angstroms per unit cell to cubic meters per mol of
# molecules.
volumes = volumes / 1.e30 * 6.022141e23 / 4.0
ref_pressures = np.array(
[0.0, 12.23, 7.77, 9.69, 12.54, 9.21, 9.90, 11.83, 18.35, 12.68, 13.15, 25.16, 12.53, 14.01, 15.34,
14.86, 11.99, 12.08, 13.03, 15.46, 21.44, 29.98, 29.41, 29.05, 27.36, 26.38, 24.97, 19.49, 13.39, 14.48, 15.27, 15.95, 16.94])
ref_pressures = ref_pressures
pressures = np.empty_like(volumes)
eos = mgd.MGD3()
for i in range(len(temperatures)):
pressures[i] = eos.pressure(temperatures[i], volumes[i], mgfeo.params)
plt.scatter(temperatures, (pressures / 1.e9 - ref_pressures))
plt.ylim(-1, 1)
plt.title("Comparing with Fei, Mao, Shu, and Hu (1991)")
plt.xlabel("Temperature (K) at various volumes")
plt.ylabel("Difference in total pressure (GPa)")
plt.show()
def check_slb_fig3():
"""
Benchmark grueneisen parameter against figure 3 of Stixrude and Lithgow-Bertelloni (2005b)
"""
perovskite = burnman.Mineral()
perovskite.params = {'name': 'perovksite',
'V_0': burnman.tools.molar_volume_from_unit_cell_volume(168.27, 4.),
'grueneisen_0': 1.63,
'q_0': 1.7}
volume = np.linspace(0.6, 1.0, 100)
grueneisen_slb = np.empty_like(volume)
grueneisen_mgd = np.empty_like(volume)
q_slb = np.empty_like(volume)
q_mgd = np.empty_like(volume)
slb_eos = slb.SLB2()
mgd_eos = mgd.MGD2()
# calculate its thermal properties
for i in range(len(volume)):
# call with dummy pressure and temperatures, they do not change it
grueneisen_slb[i] = slb_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
grueneisen_mgd[i] = mgd_eos.grueneisen_parameter(
0., 0., volume[i] * perovskite.params['V_0'], perovskite.params)
q_slb[i] = slb_eos.volume_dependent_q(
1. / volume[i], perovskite.params)
q_mgd[i] = perovskite.params['q_0']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig3.png')
plt.imshow(fig1, extent=[0.6, 1.0, 0.35, 2.0], aspect='auto')
plt.plot(volume, grueneisen_slb, 'g+', volume, grueneisen_mgd, 'b+')
plt.plot(volume, q_slb, 'g+', volume, q_mgd, 'b+')
plt.xlim(0.6, 1.0)
plt.ylim(0.35, 2.0)
plt.ylabel("Grueneisen parameter")
plt.xlabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 3 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_slb_fig7_txt():
"""
Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.603e-6,
'K_0': 127.955e9,
'Kprime_0': 4.232,
'G_0': 81.6e9,
'Gprime_0': 1.4,
'molar_mass': .140695,
'n': 7.0,
'Debye_0': 809.183,
'grueneisen_0': .993,
'q_0': 2.093,
'F_0': -1.1406e5,
'eta_s_0': 2.364}
forsterite.set_method('slb3')
data = np.loadtxt(
"../../burnman/data/input_minphys/slb_fig7.txt", skiprows=2)
temperature = np.array(data[:, 2])
pressure = np.array(data[:, 0])
rho = np.array(data[:, 3])
rho_comp = np.empty_like(rho)
Kt = np.array(data[:, 4])
Kt_comp = np.empty_like(Kt)
Ks = np.array(data[:, 5])
Ks_comp = np.empty_like(Ks)
G = np.array(data[:, 6])
G_comp = np.empty_like(G)
VB = np.array(data[:, 7])
VB_comp = np.empty_like(VB)
VS = np.array(data[:, 8])
VS_comp = np.empty_like(VS)
VP = np.array(data[:, 9])
VP_comp = np.empty_like(VP)
vol = np.array(data[:, 10])
vol_comp = np.empty_like(vol)
alpha = np.array(data[:, 11])
alpha_comp = np.empty_like(alpha)
Cp = np.array(data[:, 12])
Cp_comp = np.empty_like(Cp)
gr = np.array(data[:, 13])
gr_comp = np.empty_like(gr)
gibbs = np.array(data[:, 14])
gibbs_comp = np.empty_like(gibbs)
entropy = np.array(data[:, 15])
entropy_comp = np.empty_like(gibbs)
enthalpy = np.array(data[:, 16])
enthalpy_comp = np.empty_like(gibbs)
for i in range(len(temperature)):
forsterite.set_state(pressure[i], temperature[i])
rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i]
Kt_comp[i] = 100. * (
forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i]
Ks_comp[i] = 100. * (
forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i]
G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i]
VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i]
VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i]
VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i]
vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i]
alpha_comp[i] = 100. * (
forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1])
Cp_comp[i] = 100. * (forsterite.heat_capacity_p /
forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1])
gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i]
gibbs_comp[i] = 100. * (
forsterite.molar_gibbs / 1.e6 - gibbs[i]) / gibbs[i]
entropy_comp[i] = 100. * (
forsterite.molar_entropy - entropy[i]) / (entropy[i] if entropy[i] != 0. else 1.)
enthalpy_comp[i] = 100. * (
forsterite.molar_enthalpy / 1.e6 - enthalpy[i]) / (enthalpy[i] if enthalpy[i] != 0. else 1.)
plt.plot(temperature, rho_comp, label=r'$\rho$')
plt.plot(temperature, Kt_comp, label=r'$K_S$')
plt.plot(temperature, Ks_comp, label=r'$K_T$')
plt.plot(temperature, G_comp, label=r'$G$')
plt.plot(temperature, VS_comp, label=r'$V_S$')
plt.plot(temperature, VP_comp, label=r'$V_P$')
plt.plot(temperature, VB_comp, label=r'$V_\phi$')
plt.plot(temperature, vol_comp, label=r'$V$')
plt.plot(temperature, alpha_comp, label=r'$\alpha$')
plt.plot(temperature, Cp_comp, label=r'$c_P$')
plt.plot(temperature, gr_comp, label=r'$\gamma$')
plt.plot(temperature, gibbs_comp, label=r'Gibbs')
plt.plot(temperature, enthalpy_comp, label=r'Enthalpy')
plt.plot(temperature, entropy_comp, label=r'Entropy')
plt.xlim([0, 2750])
plt.ylim([-0.001, 0.001])
plt.xticks([0, 800, 1600, 2200])
plt.xlabel("Temperature (K)")
plt.ylabel("Percent Difference from HeFESTo")
plt.legend(loc="center right")
# plt.savefig("output_figures/benchmark1.pdf")
plt.show()
def check_slb_fig7():
"""
Calculates all values for forsterite and benchmarks with figure 7 from Stixrude and Lithgow-Bertelloni (2005)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.60e-6,
'K_0': 128.0e9,
'Kprime_0': 4.2,
'G_0': 82.0e9,
'Gprime_0': 1.4,
'n': 7.0,
'molar_mass': .140695,
'Debye_0': 809.,
'grueneisen_0': .99,
'q_0': 2.1,
'eta_s_0': 2.4}
forsterite.set_method('slb3')
temperature = np.linspace(0., 2000., 200)
volume = np.empty_like(temperature)
bulk_modulus = np.empty_like(temperature)
shear_modulus = np.empty_like(temperature)
heat_capacity = np.empty_like(temperature)
pressure = 1.0e5
forsterite.set_state(pressure, 300.)
Ks_0 = forsterite.adiabatic_bulk_modulus
# calculate its thermal properties
for i in range(len(temperature)):
forsterite.set_state(pressure, temperature[i])
volume[i] = forsterite.molar_volume / forsterite.params['V_0']
bulk_modulus[i] = forsterite.adiabatic_bulk_modulus / Ks_0
shear_modulus[i] = forsterite.shear_modulus / forsterite.params['G_0']
heat_capacity[i] = forsterite.heat_capacity_p / forsterite.params['n']
# compare with figure 7
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_vol.png')
plt.imshow(fig1, extent=[0, 2200, 0.99, 1.08], aspect='auto')
plt.plot(temperature, volume, 'g+')
plt.ylim(0.99, 1.08)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Volume V/V0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_Cp.png')
plt.imshow(fig1, extent=[0, 2200, 0., 70.], aspect='auto')
plt.plot(temperature, heat_capacity, 'g+')
plt.ylim(0, 70)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Heat Capacity Cp")
plt.title(
"Comparing with adiabatic_bulk_modulus7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_K.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, bulk_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Bulk Modulus K/K0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
fig1 = mpimg.imread('../../burnman/data/input_figures/slb_fig7_G.png')
plt.imshow(fig1, extent=[0, 2200, 0.6, 1.02], aspect='auto')
plt.plot(temperature, shear_modulus, 'g+')
plt.ylim(0.6, 1.02)
plt.xlim(0, 2200)
plt.xlabel("Temperature (K)")
plt.ylabel("Relative Shear Modulus G/G0")
plt.title(
"Comparing with Figure 7 of Stixrude and Lithgow-Bertelloni (2005)")
plt.show()
def check_averaging():
"""
Reproduce Figure 1a from Watt et. al. 1976 to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# MgO bulk and shear moduli taken from Landolt-Boernstein
# - Group III Condensed Matter Volume 41B, 1999, pp 1-3
K2 = 152. # Bulk modulus, GPa
G2 = 155. # Shear modulus, GPa
# AgCl bulk and shear moduli (estimated from plot)
G1 = G2 * 0.07
K1 = K2 * 0.27
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a1.png')
plt.imshow(fig, extent=[0, 1.0, 0.25, 1.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus / K2, 'g-')
plt.plot(volumes, r_bulk_modulus / K2, 'g-')
plt.plot(volumes, vrh_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsu_bulk_modulus / K2, 'g-')
plt.plot(volumes, hsl_bulk_modulus / K2, 'g-')
plt.ylim(0.25, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
fig = mpimg.imread('../../burnman/data/input_figures/watt_1976_a2.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 1.0], aspect='auto')
plt.plot(volumes, v_shear_modulus / G2, 'g-')
plt.plot(volumes, r_shear_modulus / G2, 'g-')
plt.plot(volumes, vrh_shear_modulus / G2, 'g-')
plt.plot(volumes, hsu_shear_modulus / G2, 'g-')
plt.plot(volumes, hsl_shear_modulus / G2, 'g-')
plt.ylim(0.0, 1.00)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 1 of Watt et al 1976")
plt.show()
# also check against some numerical values given in Berryman (1995) for
# porous glass
K = 46.3
G = 30.5
# the value for porosity=0.46 in the table appears to be a typo. Remove
# it here
porosity = np.array(
[0.0, 0.05, 0.11, 0.13, 0.25, 0.33, 0.36, 0.39, 0.44, 0.50, 0.70])
berryman_bulk_modulus = np.array(
[46.3, 41.6, 36.6, 35.1, 27.0, 22.5, 21.0, 19.6, 17.3, 14.8, 7.7]) # 15.5 probably a typo?
hsu_bulk_modulus_vals = np.empty_like(porosity)
for i in range(len(porosity)):
hsu_bulk_modulus_vals[i] = hashin_shtrikman_upper.average_bulk_moduli(
[porosity[i], 1.0 - porosity[i]], [0.0, K], [0.0, G])
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [0.0, K], [0.0, G])
fig = mpimg.imread('../../burnman/data/input_figures/berryman_fig4.png')
plt.imshow(fig, extent=[0, 1.0, 0.0, 50.0], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.scatter(porosity, hsu_bulk_modulus_vals, c='r')
plt.scatter(porosity, berryman_bulk_modulus, c='y')
plt.ylim(0.0, 50.0)
plt.xlim(0, 1.0)
plt.xlabel("Porosity")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 4 of Berryman (1995)")
plt.show()
def check_averaging_2():
"""
Reproduce Figure 1 from Hashin and Shtrikman (1963) to check the
Hashin-Shtrikman bounds for an elastic composite
"""
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
# These values are from Hashin and Shtrikman (1963)
K1 = 25.0
K2 = 60.7
G1 = 11.5
G2 = 41.8
for i in range(len(volumes)):
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[1.0 - volumes[i], volumes[i]], [K1, K2], [G1, G2])
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig1_K.png')
plt.imshow(fig, extent=[0, 1.0, 1.1, K2 + 0.3], aspect='auto')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.ylim(K1, K2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 1 of Hashin and Shtrikman (1963)")
plt.show()
fig = mpimg.imread(
'../../burnman/data/input_figures/Hashin_Shtrikman_1963_fig2_G.png')
plt.imshow(fig, extent=[0, 1.0, 0.3, G2], aspect='auto')
plt.plot(volumes, hsu_shear_modulus, 'g-')
plt.plot(volumes, hsl_shear_modulus, 'g-')
plt.ylim(G1, G2)
plt.xlim(0, 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged shear modulus")
plt.title("Comparing with Figure 2 of Hashin and Shtrikman (1963)")
plt.show()
def check_averaging_3():
"""
Reproduce Figure 3 from Avseth et al. (2010) to check the Voigt, Reuss,
Voigt-Reuss-Hill, and Hashin-Shtrikman bounds for an elastic composite
"""
voigt = burnman.averaging_schemes.Voigt()
reuss = burnman.averaging_schemes.Reuss()
voigt_reuss_hill = burnman.averaging_schemes.VoigtReussHill()
hashin_shtrikman_upper = burnman.averaging_schemes.HashinShtrikmanUpper()
hashin_shtrikman_lower = burnman.averaging_schemes.HashinShtrikmanLower()
# create arrays for sampling in volume fraction
volumes = np.linspace(0.0, 1.0, 100)
v_bulk_modulus = np.empty_like(volumes)
v_shear_modulus = np.empty_like(volumes)
r_bulk_modulus = np.empty_like(volumes)
r_shear_modulus = np.empty_like(volumes)
vrh_bulk_modulus = np.empty_like(volumes)
vrh_shear_modulus = np.empty_like(volumes)
hsu_bulk_modulus = np.empty_like(volumes)
hsu_shear_modulus = np.empty_like(volumes)
hsl_bulk_modulus = np.empty_like(volumes)
hsl_shear_modulus = np.empty_like(volumes)
hs_av_bulk_modulus = np.empty_like(volumes)
hs_av_shear_modulus = np.empty_like(volumes)
# Quartz bulk and shear moduli
K2 = 37.
G2 = 45.
# Fluid bulk and shear moduli
G1 = 0.00001
K1 = 2.35
for i in range(len(volumes)):
v_bulk_modulus[i] = voigt.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
v_shear_modulus[i] = voigt.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_bulk_modulus[i] = reuss.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
r_shear_modulus[i] = reuss.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_bulk_modulus[i] = voigt_reuss_hill.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
vrh_shear_modulus[i] = voigt_reuss_hill.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_bulk_modulus[i] = hashin_shtrikman_upper.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsu_shear_modulus[i] = hashin_shtrikman_upper.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_bulk_modulus[i] = hashin_shtrikman_lower.average_bulk_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hsl_shear_modulus[i] = hashin_shtrikman_lower.average_shear_moduli(
[volumes[i], 1.0 - volumes[i]], [K1, K2], [G1, G2])
hs_av_bulk_modulus[i] = 0.5 * hsl_bulk_modulus[
i] + 0.5 * hsu_bulk_modulus[i]
hs_av_shear_modulus[i] = 0.5 * hsl_shear_modulus[
i] + 0.5 * hsu_shear_modulus[i]
fig = mpimg.imread(
'../../burnman/data/input_figures/Avseth_et_al_2010_fig3_K.png')
plt.imshow(fig, extent=[0, 1.0, 0., 40.0], aspect='auto')
plt.plot(volumes, v_bulk_modulus, 'g-')
plt.plot(volumes, r_bulk_modulus, 'g-')
plt.plot(volumes, vrh_bulk_modulus, 'g-')
plt.plot(volumes, hsu_bulk_modulus, 'g-')
plt.plot(volumes, hsl_bulk_modulus, 'g-')
plt.plot(volumes, hs_av_bulk_modulus, 'g-')
plt.ylim(0., 40.00)
plt.xlim(0., 1.0)
plt.xlabel("Volume fraction")
plt.ylabel("Averaged bulk modulus")
plt.title("Comparing with Figure 3 of Avseth et al., 2010")
plt.show()
if __name__ == "__main__":
check_averaging()
check_averaging_2()
check_averaging_3()
check_birch_murnaghan()
check_birch_murnaghan_4th()
check_vinet()
check_slb_fig7()
check_slb_fig3()
check_mgd_shim_duffy_kenichi()
check_mgd_fei_mao_shu_hu()
check_slb_fig7_txt()
| gpl-2.0 |
cbertinato/pandas | pandas/tests/io/formats/test_format.py | 1 | 109440 | """
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, NaT, Series, Timestamp, date_range,
get_option, option_context, read_csv, reset_option, set_option)
import pandas.util.testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or is_platform_32bit()
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split('\n')[0].startswith("<class")
c2 = r.split('\n')[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split('\n')) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == '...')[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == '...':
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r'^[\.\ ]+$', row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(
df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(
df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split('\n'):
if line.endswith('\\'):
return True
return False
@pytest.mark.filterwarnings('ignore::FutureWarning:.*format')
class TestDataFrameFormatting:
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[('err', object)])
for i in range(len(arr)):
arr['err'][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df['err'])
repr(df)
df.to_string()
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ('non-null' in buf.getvalue()) is result
with option_context('display.max_info_rows', 20,
'display.max_info_columns', 20):
check(None, True)
check(True, True)
check(False, False)
with option_context('display.max_info_rows', 5,
'display.max_info_columns', 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({'tups': list(zip(range(10), range(10)))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame({'A': np.random.randn(10),
'B': [tm.rands(np.random.randint(
max_len - 1, max_len + 1)) for i in range(10)
]})
r = repr(df)
r = r[r.find('\n') + 1:]
adj = fmt._get_adjustment()
for line, value in zip(r.split('\n'), df['B']):
if adj.len(value) + 1 > max_len:
assert '...' in line
else:
assert '...' not in line
with option_context("display.max_colwidth", 999999):
assert '...' not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert '...' not in repr(df)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
with option_context("display.chop_threshold", 0.2):
assert repr(df) == ' 0 1\n0 0.0 0.5\n1 0.5 0.0'
with option_context("display.chop_threshold", 0.6):
assert repr(df) == ' 0 1\n0 0.0 0.0\n1 0.0 0.0'
with option_context("display.chop_threshold", None):
assert repr(df) == ' 0 1\n0 0.1 0.5\n1 0.5 -0.1'
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40],
[8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 -1.000000e-11\n'
'2 30.0 2.000000e-09\n'
'3 40.0 -2.000000e-11')
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (' 0 1\n'
'0 10.0 0.000000e+00\n'
'1 20.0 0.000000e+00\n'
'2 30.0 0.000000e+00\n'
'3 40.0 0.000000e+00')
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (' 0 1\n'
'0 10.0 8.000000e-10\n'
'1 20.0 0.000000e+00\n'
'2 30.0 2.000000e-09\n'
'3 40.0 0.000000e+00')
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
def test_repr_set(self):
assert printing.pprint_thing({1}) == '{1}'
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather then
# stylized
idx = Index(['a', 'b'])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context('mode.sim_interactive', True):
df = DataFrame(np.random.randn(10, 4))
assert '\\' not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame('hello', index=[0], columns=[0])
df_wide = DataFrame('hello', index=[0], columns=range(10))
df_tall = DataFrame('hello', index=range(30), columns=range(5))
with option_context('mode.sim_interactive', True):
with option_context('display.max_columns', 10, 'display.width', 20,
'display.max_rows', 20,
'display.show_dimensions', True):
with option_context('display.expand_frame_repr', True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context('display.expand_frame_repr', False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame('hello', index=range(1000), columns=range(5))
with option_context('mode.sim_interactive', False, 'display.width', 0,
'display.max_rows', 5000):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
index = range(5)
columns = pd.MultiIndex.from_tuples([
('This is a long title with > 37 chars.', 'cat'),
('This is a loooooonger title with > 43 chars.', 'dog'),
])
df = pd.DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split('\n')[:2]
assert 'long' in h1
assert 'loooooonger' in h1
assert 'cat' in h2
assert 'dog' in h2
# regular columns
df2 = pd.DataFrame({"A" * 41: [1, 2], 'B' * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split('\n')[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = pd.DataFrame(np.random.rand(1, 7))
monkeypatch.setattr('pandas.io.formats.format.get_terminal_size',
lambda: terminal_size)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = pd.DataFrame({'a': [108480, 30830], 'b': [12345, 12345],
'c': [12345, 12345], 'd': [12345, 12345],
'e': ['a' * 50] * 2})
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip("terminal size too small, "
"{0} x {1}".format(term_width, term_height))
def mkframe(n):
index = ['{i:05d}'.format(i=i) for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context('mode.sim_interactive', True):
with option_context('display.width', term_width * 2):
with option_context('display.max_rows', 5,
'display.max_columns', 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context('display.max_rows', 20,
'display.max_columns', 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context('display.max_rows', 9,
'display.max_columns', 10):
# out vertical bounds can not result in exanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context('display.max_columns', 100, 'display.max_rows',
term_width * 20, 'display.width', None):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame([{'a': 'foo',
'b': 'bar',
'c': 'uncomfortably long line with lots of stuff',
'd': 1}, {'a': 'foo',
'b': 'bar',
'c': 'stuff',
'd': 1}])
df.set_index(['a', 'b', 'c'])
assert str(df) == (
' a b c d\n'
'0 foo bar uncomfortably long line with lots of stuff 1\n'
'1 foo bar stuff 1')
with option_context('max_colwidth', 20):
assert str(df) == (' a b c d\n'
'0 foo bar uncomfortably lo... 1\n'
'1 foo bar stuff 1')
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context('mode.sim_interactive', True):
with option_context('max_rows', None):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context('max_rows', 0):
with option_context('max_columns', 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context('max_rows', 0):
with option_context('max_columns', None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context('max_rows', None):
with option_context('max_columns', 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ['\u03c3'] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({'unicode': unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(['abc', '\u03c3a', 'aegdvg'])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split('\n')
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith('dtype:'):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self, float_frame):
df = DataFrame({'\u03c3': np.arange(10.)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = float_frame.to_string()
assert isinstance(result, str)
def test_to_string_utf8_columns(self):
n = "\u05d0".encode('utf-8')
with option_context('display.max_rows', 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({'c/\u03c3': []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(['\xc2'])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame({'int': [1, 2, 3],
'float': [1.0, 2.0, 3.0],
'object': [(1, 2), True, False]},
columns=['int', 'float', 'object'])
formatters = [('int', lambda x: '0x{x:x}'.format(x=x)),
('float', lambda x: '[{x: 4.1f}]'.format(x=x)),
('object', lambda x: '-{x!s}-'.format(x=x))]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=list(zip(*formatters))[1])
assert result == (' int float object\n'
'0 0x1 [ 1.0] -(1, 2)-\n'
'1 0x2 [ 2.0] -True-\n'
'2 0x3 [ 3.0] -False-')
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({'months': months})
def format_func(x):
return x.strftime('%Y-%m')
result = x.to_string(formatters={'months': format_func})
expected = 'months\n0 2016-01\n1 2016-02'
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')})
def format_func(x):
return x.strftime('%H:%M')
result = x.to_string(formatters={'hod': format_func})
expected = 'hod\n0 10:10\n1 12:12'
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({'c/\u03c3': [1, 2, 3]})
result = df.to_string(
formatters={'c/\u03c3': lambda x: '{x}'.format(x=x)})
assert result == ' c/\u03c3\n' + '0 1\n1 2\n2 3'
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame({'a': ['あ', 'いいい', 'う', 'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4")
assert repr(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ")
assert repr(df) == expected
# all col
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ")
assert repr(df) == expected
# column name
df = DataFrame({'b': ['あ', 'いいい', 'う', 'ええええええ'],
'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4")
assert repr(df) == expected
# index
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['あああ', 'いいいいいい', 'うう', 'え'])
expected = (" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ")
assert repr(df) == expected
# index name
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=pd.Index(['あ', 'い', 'うう', 'え'],
name='おおおお'))
expected = (" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ")
assert repr(df) == expected
# all
df = DataFrame({'あああ': ['あああ', 'い', 'う', 'えええええ'],
'いいいいい': ['あ', 'いいい', 'う', 'ええ']},
index=pd.Index(['あ', 'いいい', 'うう', 'え'],
name='お'))
expected = (" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ")
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([('あ', 'いい'), ('う', 'え'), (
'おおお', 'かかかか'), ('き', 'くく')])
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=idx)
expected = (" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ")
assert repr(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns', 3):
df = pd.DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ'],
'c': ['お', 'か', 'ききき', 'くくくくくく'],
'ああああ': ['さ', 'し', 'す', 'せ']},
columns=['a', 'b', 'c', 'ああああ'])
expected = (" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]")
assert repr(df) == expected
df.index = ['あああ', 'いいいい', 'う', 'aaa']
expected = (" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]")
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# mid col
df = DataFrame({'a': ['あ', 'いいい', 'う', 'ええええええ'],
'b': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4")
assert repr(df) == expected
# last col
df = DataFrame({'a': [1, 222, 33333, 4],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ")
assert repr(df) == expected
# all col
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['a', 'bb', 'c', 'ddd'])
expected = (" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ")
assert repr(df) == expected
# column name
df = DataFrame({'b': ['あ', 'いいい', 'う', 'ええええええ'],
'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', 'ddd'])
expected = (" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4")
assert repr(df) == expected
# index
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=['あああ', 'いいいいいい', 'うう', 'え'])
expected = (" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ")
assert repr(df) == expected
# index name
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=pd.Index(['あ', 'い', 'うう', 'え'],
name='おおおお'))
expected = (" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ")
assert repr(df) == expected
# all
df = DataFrame({'あああ': ['あああ', 'い', 'う', 'えええええ'],
'いいいいい': ['あ', 'いいい', 'う', 'ええ']},
index=pd.Index(['あ', 'いいい', 'うう', 'え'],
name='お'))
expected = (" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ")
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([('あ', 'いい'), ('う', 'え'), (
'おおお', 'かかかか'), ('き', 'くく')])
df = DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ']},
index=idx)
expected = (" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ")
assert repr(df) == expected
# truncate
with option_context('display.max_rows', 3, 'display.max_columns',
3):
df = pd.DataFrame({'a': ['あああああ', 'い', 'う', 'えええ'],
'b': ['あ', 'いいい', 'う', 'ええええええ'],
'c': ['お', 'か', 'ききき', 'くくくくくく'],
'ああああ': ['さ', 'し', 'す', 'せ']},
columns=['a', 'b', 'c', 'ああああ'])
expected = (" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]")
assert repr(df) == expected
df.index = ['あああ', 'いいいい', 'う', 'aaa']
expected = (" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]")
assert repr(df) == expected
# ambiguous unicode
df = DataFrame({'b': ['あ', 'いいい', '¡¡', 'ええええええ'],
'あああああ': [1, 222, 33333, 4]},
index=['a', 'bb', 'c', '¡¡¡'])
expected = (" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4")
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({'c/\u03c3': Series()})
nonempty = DataFrame({'c/\u03c3': Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr",
False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(
df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (
has_horizontally_truncated_repr(df))
with option_context("display.max_rows", 15,
"display.max_columns", 15):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(
df)
def test_to_string_truncate_multilevel(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series([datetime.datetime(2012, 1, 1)] * 10 +
[datetime.datetime(1012, 1, 2)] + [
datetime.datetime(2012, 1, 3)] * 10)
with pd.option_context('display.max_rows', 8):
result = str(s)
assert 'object' in result
# 12045
df = DataFrame({'text': ['some words'] + [None] * 9})
with pd.option_context('display.max_rows', 8,
'display.max_columns', 3):
result = str(df)
assert 'None' in result
assert 'NaN' not in result
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{'date': [pd.Timestamp('20130101').tz_localize('UTC')] +
[pd.NaT] * 5})
with option_context("display.max_rows", 5):
result = str(df)
assert '2013-01-01 00:00:00+00:00' in result
assert 'NaT' in result
assert '...' in result
assert '[6 rows x 1 columns]' in result
dts = [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00-05:00 1\n'
'1 2011-01-01 00:00:00-05:00 2\n'
'.. ... ..\n'
'8 NaT 9\n'
'9 NaT 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 NaT 1\n'
'1 NaT 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
dts = ([pd.Timestamp('2011-01-01', tz='Asia/Tokyo')] * 5 +
[pd.Timestamp('2011-01-01', tz='US/Eastern')] * 5)
df = pd.DataFrame({"dt": dts,
"x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context('display.max_rows', 5):
expected = (' dt x\n'
'0 2011-01-01 00:00:00+09:00 1\n'
'1 2011-01-01 00:00:00+09:00 2\n'
'.. ... ..\n'
'8 2011-01-01 00:00:00-05:00 9\n'
'9 2011-01-01 00:00:00-05:00 10\n\n'
'[10 rows x 2 columns]')
assert repr(df) == expected
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999',
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({'A': date_range(start=start_date,
freq='D', periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date,
freq='D', periods=5)
df = DataFrame({'A': range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split('\n')
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({'c/\u03c3': Series({'test': np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath('io', 'parser', 'data', 'unicode_series.csv')
df = pd.read_csv(filepath, header=None, encoding='latin1')
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({'foo': [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ['\'Til There Was You (1997)',
'ldum klaka (Cold Fever) (1994)']
fmt.set_option('display.max_rows', 1)
df = DataFrame(columns=['a', 'b', 'c'], index=index)
repr(df)
repr(df.T)
fmt.set_option('display.max_rows', 200)
def test_wide_repr(self):
with option_context('mode.sim_interactive', True,
'display.show_dimensions', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
assert "10 rows x {c} columns".format(c=max_cols - 1) in rep_str
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_columns(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
df = DataFrame(np.random.randn(5, 3),
columns=['a' * 90, 'b' * 90, 'c' * 90])
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = 'DataFrame Index'
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'DataFrame Index' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option('display.max_columns')
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)),
index=midx)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert 'Level 0 Level 1' in line
reset_option('display.expand_frame_repr')
def test_wide_repr_multiindex_cols(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = get_option('display.max_columns')
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(
tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(tm.rands_array(25, (10, max_cols - 1)),
index=midx, columns=mcols)
df.index.names = ['Level 0', 'Level 1']
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150, 'display.max_columns', 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_unicode(self):
with option_context('mode.sim_interactive', True,
'display.max_columns', 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option('display.expand_frame_repr', False)
rep_str = repr(df)
set_option('display.expand_frame_repr', True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context('display.width', 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option('display.expand_frame_repr')
def test_wide_repr_wide_long_columns(self):
with option_context('mode.sim_interactive', True):
df = DataFrame({'a': ['a' * 30, 'b' * 30],
'b': ['c' * 70, 'd' * 80]})
result = repr(df)
assert 'ccccc' in result
assert 'ddddd' in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=['s{x:04d}'.format(x=x) for x in range(n)], dtype='int64')
import re
str_rep = str(s)
nmatches = len(re.findall('dtype', str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame({'id1': {0: '1a3',
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: '78d',
1: '79d'},
'value': {0: 123,
1: 64}})
# multi-index
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = (' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# index
y = df.set_index('id2')
result = y.to_string()
expected = (' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nd67 9h4 79d 64')
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(['id1', 'id2']).set_index('id3', append=True)
result = y.to_string()
expected = (' value\nid1 id2 id3 \n'
'1a3 NaN 78d 123\n9h4 d67 79d 64')
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index('id2')
result = y.to_string()
expected = (' id1 id3 value\nid2 \n'
'NaN 1a3 78d 123\nNaN 9h4 79d 64')
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, 'id2'] = np.nan
y = df2.set_index(['id2', 'id3'])
result = y.to_string()
expected = (' id1 value\nid2 id3 \n'
'NaN 78d 1a3 123\n 79d 9h4 64')
assert result == expected
df = DataFrame({'id1': {0: np.nan,
1: '9h4'},
'id2': {0: np.nan,
1: 'd67'},
'id3': {0: np.nan,
1: '79d'},
'value': {0: 123,
1: 64}})
y = df.set_index(['id1', 'id2', 'id3'])
result = y.to_string()
expected = (' value\nid1 id2 id3 \n'
'NaN NaN NaN 123\n9h4 d67 79d 64')
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=np.arange(200))
biggie.loc[:20, 'A'] = np.nan
biggie.loc[:20, 'B'] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(columns=['B', 'A'], col_space=17,
float_format='%.5f'.__mod__)
lines = result.split('\n')
header = lines[0].strip().split()
joined = '\n'.join(re.sub(r'\s+', ' ', x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header,
header=None, sep=' ')
tm.assert_series_equal(recons['B'], biggie['B'])
assert recons['A'].count() == biggie['A'].count()
assert (np.abs(recons['A'].dropna() -
biggie['A'].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=['A'], col_space=17)
header = result.split('\n')[0].strip().split()
expected = ['A']
assert header == expected
biggie.to_string(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
biggie.to_string(columns=['B', 'A'], float_format=str)
biggie.to_string(columns=['B', 'A'], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(header=['X', 'Y'])
expected = ' X Y\n0 1 4\n1 2 5\n2 3 6'
assert df_s == expected
with pytest.raises(ValueError):
df.to_string(header=['X'])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({'x': [11, 22], 'y': [33, -44], 'z': ['AAA', ' ']})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = (" x y z\n"
" 11 33 AAA\n"
" 22 -44 ")
assert df_s == expected
df_s = df[['y', 'x', 'z']].to_string(index=False)
expected = (" y x z\n"
" 33 11 AAA\n"
"-44 22 ")
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, 33], 'y': [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({'x': [11, 22, -33], 'y': [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option('display.precision', 5, 'display.column_space', 12,
'display.notebook_repr_html', False)
df = DataFrame({'x': [0, 0.25, 3456.000, 12e+45, 1.64e+6, 1.7e+8,
1.253456, np.pi, -1e6]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n0 0.00000e+000\n1 2.50000e-001\n'
'2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n'
'5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n'
'8 -1.00000e+006')
else:
expected = (' x\n0 0.00000e+00\n1 2.50000e-01\n'
'2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n'
'5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n'
'8 -1.00000e+06')
assert df_s == expected
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string()
expected = (' x\n' '0 3234.000\n' '1 0.253')
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({'x': [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = (' x\n'
'0 1.000000e+009\n'
'1 2.512000e-001')
else:
expected = (' x\n'
'0 1.000000e+09\n'
'1 2.512000e-01')
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({'x': [0.19999]})
expected = ' x\n0 0.200'
assert df.to_string(float_format='%.3f') == expected
# GH 22270
df = DataFrame({'x': [100.0]})
expected = ' x\n0 100'
assert df.to_string(float_format='%.0f') == expected
def test_to_string_small_float_values(self):
df = DataFrame({'a': [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if '{x:.4g}'.format(x=1.7e8) == '1.7e+008':
expected = (' a\n'
'0 1.500000e+000\n'
'1 1.000000e-017\n'
'2 -5.500000e-007')
else:
expected = (' a\n'
'0 1.500000e+00\n'
'1 1.000000e-17\n'
'2 -5.500000e-07')
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = (' 0\n' '0 0\n' '1 0\n' '2 -0')
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = (' 0\n'
'1.5 0\n'
'2.0 1\n'
'3.0 2\n'
'4.0 3\n'
'5.0 4')
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514
with pd.option_context('display.precision', 5):
df = DataFrame({'x': [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j)]})
result = df.to_string()
expected = (' x\n0 0.44678+0.07152j\n'
'1 0.27394+0.23515j\n'
'2 0.26975+0.32506j')
assert result == expected
def test_to_string_ascii_error(self):
data = [('0 ', ' .gitignore ', ' 5 ',
' \xe2\x80\xa2\xe2\x80\xa2\xe2\x80'
'\xa2\xe2\x80\xa2\xe2\x80\xa2')]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({'x': [-15, 20, 25, -35]})
assert issubclass(df['x'].dtype.type, np.integer)
output = df.to_string()
expected = (' x\n' '0 -15\n' '1 20\n' '2 25\n' '3 -35')
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={'__index__': lambda x: 'abc' [x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({'x': [3234, 0.253]})
df_s = df.to_string(justify='left')
expected = (' x \n' '0 3234.000\n' '1 0.253')
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame({'A': [np.nan, -1, -2.1234, 3, 4],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0000 foo\n'
'2 -2.1234 foooo\n'
'3 3.0000 fooooo\n'
'4 4.0000 bar')
assert result == expected
df = DataFrame({'A': [np.nan, -1., -2., 3., 4.],
'B': [np.nan, 'foo', 'foooo', 'fooooo', 'bar']})
result = df.to_string()
expected = (' A B\n'
'0 NaN NaN\n'
'1 -1.0 foo\n'
'2 -2.0 foooo\n'
'3 3.0 fooooo\n'
'4 4.0 bar')
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame({
'A': [-np.inf, np.inf, -1, -2.1234, 3, 4],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0000 foo\n'
'3 -2.1234 foooo\n'
'4 3.0000 fooooo\n'
'5 4.0000 bar')
assert result == expected
df = DataFrame({
'A': [-np.inf, np.inf, -1., -2., 3., 4.],
'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar']
})
result = df.to_string()
expected = (' A B\n'
'0 -inf -inf\n'
'1 inf inf\n'
'2 -1.0 foo\n'
'3 -2.0 foooo\n'
'4 3.0 fooooo\n'
'5 4.0 bar')
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({'A': [6.0, 3.1, 2.2]})
expected = ' A\n0 6,0\n1 3,1\n2 2,2'
assert df.to_string(decimal=',') == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split('\n')) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', True):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', False):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
with option_context('display.max_rows', 2, 'display.max_columns', 2,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' in str(df)
assert '5 rows' in df._repr_html_()
with option_context('display.max_rows', 10, 'display.max_columns', 40,
'display.width', 500, 'display.expand_frame_repr',
'info', 'display.show_dimensions', 'truncate'):
assert '5 rows' not in str(df)
assert '5 rows' not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option('display.max_rows', 1, 'display.max_columns', 1)
df._repr_html_()
fmt.set_option('display.notebook_repr_html', False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option('display.show_dimensions', True)
assert '2 rows' in df._repr_html_()
fmt.set_option('display.show_dimensions', False)
assert '2 rows' not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert 'tex2jax_ignore' not in df._repr_html_()
with pd.option_context('display.html.use_mathjax', False):
assert 'tex2jax_ignore' in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product([np.arange(max_cols // 2),
['foo', 'bar']],
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
reg_repr = df._repr_html_()
assert '...' not in reg_repr
mcols = MultiIndex.from_product((np.arange(1 + (max_cols // 2)),
['foo', 'bar']),
names=['first', 'second'])
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))),
columns=mcols)
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_repr_html_long(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert '..' in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert '{h} rows '.format(h=h) in long_repr
assert '2 columns' in long_repr
def test_repr_html_float(self):
with option_context('display.max_rows', 60):
max_rows = get_option('display.max_rows')
h = max_rows - 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
reg_repr = df._repr_html_()
assert '..' not in reg_repr
assert '<td>{val}</td>'.format(val=str(40 + h)) in reg_repr
h = max_rows + 1
df = DataFrame({'idx': np.linspace(-10, 10, h),
'A': np.arange(1, 1 + h),
'B': np.arange(41, 41 + h)}).set_index('idx')
long_repr = df._repr_html_()
assert '..' in long_repr
assert '<td>{val}</td>'.format(val='31') not in long_repr
assert '{h} rows '.format(h=h) in long_repr
assert '2 columns' in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx,
columns=['A', 'B'])
with option_context('display.max_rows', 60, 'display.max_columns', 20):
reg_repr = df._repr_html_()
assert '...' not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ['foo', 'bar']))
idx = MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = DataFrame(np.random.randn((max_L1 + 1) * 2, 2), index=idx,
columns=['A', 'B'])
long_repr = df._repr_html_()
assert '...' in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context('display.max_rows', 60, 'display.max_columns', 20):
assert '...' in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context('display.large_repr', 'info'):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 4):
assert has_non_verbose_info_repr(df)
with option_context('display.large_repr', 'info',
'display.max_columns', 1,
'display.max_info_columns', 5):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r'<class' not in df._repr_html_()
with option_context('display.large_repr', 'info'):
assert r'<class' in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert '<class' not in df._repr_html_()
with option_context('display.large_repr', 'info',
'display.max_columns', max_cols):
assert '<class' in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {'config': {'KernelApp':
{'parent_appname': 'ipython-qtconsole'}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option('display.max_rows', 5, 'display.max_columns', 2)
repstr = df._repr_html_()
assert 'class' in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
skip = True
for line in repr(DataFrame({'A': vals})).split('\n')[:-2]:
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert ('+010' in line) or skip
else:
assert ('+10' in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({'A': [{'a': 1, 'b': 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame({'A': pd.period_range('2013-01',
periods=4, freq='M'),
'B': [pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H'),
pd.Period('2011-04', freq='M')],
'C': list('abcd')})
exp = (" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d")
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(['a'] * 100)
s2 = pd.Series(['ab'] * 100)
s3 = pd.Series(['a', 'ab', 'abc', 'abcd', 'abcde', 'abcdef'])
s4 = s3[::-1]
test_sers = {'onel': s1, 'twol': s2, 'asc': s3, 'desc': s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(['\u03c3'] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = 'title1'
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == 'Series([], Freq: B)'
result = self.ts[:0].to_string(length=0)
assert result == 'Series([], Freq: B)'
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split('\n')[-1].strip()
assert last_line == ("Freq: B, Name: foo, "
"Length: {cp}, dtype: float64".format(cp=len(cp)))
def test_freq_name_separation(self):
s = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10), name=0)
result = repr(s)
assert 'Freq: D, Name: 0' in result
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n' + '1 NaN\n' + '2 -1.23\n' +
'3 4.56')
assert result == expected
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n' + '1 NaN\n' + '2 bar\n' + '3 baz')
assert result == expected
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n' + '1 5\n' + '2 bar\n' + '3 baz')
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n' + '1 1.5678\n' + '2 NaN\n' +
'3 -3.0000\n' + '4 NaN')
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = (' 1\n' + ' 2\n' + ' 3\n' + ' 4')
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name='\u05e2\u05d1\u05e8\u05d9\u05ea')
sf = fmt.SeriesFormatter(s, name='\u05e2\u05d1\u05e8\u05d9\u05ea')
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=['あ', 'いい', 'ううう', 'ええええ'])
expected = ("あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object")
assert repr(s) == expected
# unicode values
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = ("a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object")
assert repr(s) == expected
# both
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['ああ', 'いいいい', 'う', 'えええ'])
expected = ("ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\ndtype: object")
assert repr(s) == expected
# unicode footer
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['ああ', 'いいいい', 'う', 'えええ'],
name='おおおおおおお')
expected = ("ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object")
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([('あ', 'いい'), ('う', 'え'), (
'おおお', 'かかかか'), ('き', 'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = ("あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64")
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, 'あああ'])
expected = ("1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64")
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), 'あああ'])
expected = ("1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64")
assert repr(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
name='おおおおおおお')
expected = ("0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object")
assert repr(s) == expected
s.index = ['ああ', 'いいいい', 'う', 'えええ']
expected = ("ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object")
assert repr(s) == expected
# Emable Unicode option -----------------------------------------
with option_context('display.unicode.east_asian_width', True):
# unicode index
s = Series(['a', 'bb', 'CCC', 'D'],
index=['あ', 'いい', 'ううう', 'ええええ'])
expected = ("あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object")
assert repr(s) == expected
# unicode values
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['a', 'bb', 'c', 'ddd'])
expected = ("a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object")
assert repr(s) == expected
# both
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['ああ', 'いいいい', 'う', 'えええ'])
expected = ("ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object")
assert repr(s) == expected
# unicode footer
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
index=['ああ', 'いいいい', 'う', 'えええ'],
name='おおおおおおお')
expected = ("ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object")
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples([('あ', 'いい'), ('う', 'え'), (
'おおお', 'かかかか'), ('き', 'くく')])
s = Series([1, 22, 3333, 44444], index=idx)
expected = ("あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64")
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, 'AB', np.nan, 'あああ'])
expected = ("1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64")
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series([1, 22, 3333, 44444],
index=[1, 'AB', pd.Timestamp('2011-01-01'), 'あああ'])
expected = ("1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64")
assert repr(s) == expected
# truncate
with option_context('display.max_rows', 3):
s = Series(['あ', 'いい', 'ううう', 'ええええ'],
name='おおおおおおお')
expected = ("0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object")
assert repr(s) == expected
s.index = ['ああ', 'いいいい', 'う', 'えええ']
expected = ("ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object")
assert repr(s) == expected
# ambiguous unicode
s = Series(['¡¡', 'い¡¡', 'ううう', 'ええええ'],
index=['ああ', '¡¡¡¡いい', '¡¡', 'えええ'])
expected = ("ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object")
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [2.08430917305e+10, 3.52205017305e+10, 2.30674817305e+10,
2.03954217305e+10, 5.59897817305e+10]
for line in repr(Series(vals)).split('\n'):
if line.startswith('dtype:'):
continue
if _three_digit_exp():
assert '+010' in line
else:
assert '+10' in line
def test_datetimeindex(self):
index = date_range('20130102', periods=6)
s = Series(1, index=index)
result = s.to_string()
assert '2013-01-02' in result
# nat in index
s2 = Series(2, index=[Timestamp('20130111'), NaT])
s = s2.append(s)
result = s.to_string()
assert 'NaT' in result
# nat in summary
result = str(s2.index)
assert 'NaT' in result
@pytest.mark.parametrize('start_date', [
'2017-01-01 23:59:59.999999999',
'2017-01-01 23:59:59.99999999',
'2017-01-01 23:59:59.9999999',
'2017-01-01 23:59:59.999999',
'2017-01-01 23:59:59.99999',
'2017-01-01 23:59:59.9999'
])
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq='D', periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq='D', periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype='timedelta64[ns]')).to_string()
s = Series(date_range('2012-1-1', periods=3, freq='D'))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert '1 days' in result
assert '00:00:00' not in result
assert 'NaT' in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:59:59.999850' in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +23:00:00' in result
assert '1 days 23:00:00' in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:59:00' in result
assert '1 days 22:59:00' in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert '-1 days +22:58:59.999850' in result
assert '0 days 22:58:59.999850' in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - s2
result = y.to_string()
assert '-1 days +23:54:57' in result
td = timedelta(microseconds=550)
s2 = Series(date_range('2012-1-1', periods=3, freq='D')) + td
y = s - td
result = y.to_string()
assert '2012-01-01 23:59:59.999450' in result
# no boxing of the actual elements
td = Series(pd.timedelta_range('1 days', periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({'A': [1, 2], 'B': ['2012-01-01', '2012-01-02']})
df['B'] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert '2012-01-01' in result
def test_period(self):
# GH 12615
index = pd.period_range('2013-01', periods=6, freq='M')
s = Series(np.arange(6, dtype='int64'), index=index)
exp = ("2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64")
assert str(s) == exp
s = Series(index)
exp = ("0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]")
assert str(s) == exp
# periods with mixed freq
s = Series([pd.Period('2011-01', freq='M'),
pd.Period('2011-02-01', freq='D'),
pd.Period('2011-03-01 09:00', freq='H')])
exp = ("0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object")
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split('\n')) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split('\n')) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split('\n')) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split('\n')) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10,
"display.show_dimensions", False):
res = repr(s)
exp = ('0 1.0000\n1 1.0000\n2 1.0000\n3 '
'1.0000\n4 1.0000\n ... \n125 '
'1.0000\n126 1.0000\n127 0.9999\n128 '
'1.0000\n129 1.0000\ndtype: float64')
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split('\n')
lines = [line for line in repr(s).split('\n')
if not re.match(r'[^\.]*\.+', line)][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
res = repr(test_sers['onel'])
exp = '0 a\n1 a\n ..\n98 a\n99 a\ndtype: object'
assert exp == res
res = repr(test_sers['twol'])
exp = ('0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype:'
' object')
assert exp == res
res = repr(test_sers['asc'])
exp = ('0 a\n1 ab\n ... \n4 abcde\n5'
' abcdef\ndtype: object')
assert exp == res
res = repr(test_sers['desc'])
exp = ('5 abcdef\n4 abcde\n ... \n1 ab\n0'
' a\ndtype: object')
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype='int64')
with option_context("display.max_rows", 1):
strrepr = repr(s).split('\n')
exp1 = ['0', '0']
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = ['..']
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r'[^\.]*(\.*)', s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace('\n', '')
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert 'Length' not in repr(s)
with option_context("display.max_rows", 4):
assert 'Length' in repr(s)
with option_context("display.show_dimensions", True):
assert 'Length' in repr(s)
with option_context("display.max_rows", 4,
"display.show_dimensions", False):
assert 'Length' not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype='int64')
s.name = 'myser'
res = s.to_string(max_rows=2, name=True)
exp = '0 0\n ..\n99 99\nName: myser'
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, dtype=True)
exp = '0 0\n ..\n99 99\ndtype: int64'
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = '0 0\n ..\n99 99'
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype='int64')
res = s.to_string(max_rows=2, length=True)
exp = '0 0\n ..\n99 99\nLength: 100'
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100))
res = s.to_string(na_rep='foo', max_rows=2)
exp = '0 foo\n ..\n99 foo'
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype='float64')
res = s.to_string(float_format=lambda x: '{0:2.1f}'.format(x),
max_rows=2)
exp = '0 0.0\n ..\n9 9.0'
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype='int64')
s.index.name = 'foo'
res = s.to_string(header=True, max_rows=2)
exp = 'foo\n0 0\n ..\n9 9'
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = '0 0\n ..\n9 9'
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = (pd.DataFrame({'a': [0], 'b': [1], 'c': [2], 'd': [3]})
.set_index(['a', 'b']))
res = df.to_string(header=['r1', 'r2'])
exp = ' r1 r2\na b \n0 1 2 3'
assert res == exp
def _three_digit_exp():
return '{x:.4g}'.format(x=1.7e8) == '1.7e+008'
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context('display.precision', 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{'col1': [9.999e-8, 1e-7, 1.0001e-7, 2e-7, 4.999e-7, 5e-7,
5.0001e-7, 6e-7, 9.999e-7, 1e-6, 1.0001e-6, 2e-6,
4.999e-6, 5e-6, 5.0001e-6, 6e-6]})
expected_output = {
(0, 6):
' col1\n'
'0 9.999000e-08\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 6):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07',
(1, 8):
' col1\n'
'1 1.000000e-07\n'
'2 1.000100e-07\n'
'3 2.000000e-07\n'
'4 4.999000e-07\n'
'5 5.000000e-07\n'
'6 5.000100e-07\n'
'7 6.000000e-07',
(8, 16):
' col1\n'
'8 9.999000e-07\n'
'9 1.000000e-06\n'
'10 1.000100e-06\n'
'11 2.000000e-06\n'
'12 4.999000e-06\n'
'13 5.000000e-06\n'
'14 5.000100e-06\n'
'15 6.000000e-06',
(9, 16):
' col1\n'
'9 0.000001\n'
'10 0.000001\n'
'11 0.000002\n'
'12 0.000005\n'
'13 0.000005\n'
'14 0.000005\n'
'15 0.000006'
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context('display.precision', 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == ' x\n0 12345.6789'
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == ' x\n0 2000000.0'
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == ' x\n0 1.2346e+04\n1 2.0000e+06'
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='sub_day')
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1s = pd.to_timedelta(1, unit='s')
delta_500ms = pd.to_timedelta(500, unit='ms')
drepr = lambda x: x._repr_base(format='long')
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit='D')
delta_0d = pd.to_timedelta(0, unit='D')
delta_1ns = pd.to_timedelta(1, unit='ns')
drepr = lambda x: x._repr_base(format='all')
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit='s')
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit='D')
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range('20130101 09:00:00', periods=5, freq='D'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range('20130101 09:00:00', periods=5, freq='s'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range('20130101 09:00:00', periods=5, freq='ms'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='us'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range('20130101 09:00:00', periods=5, freq='N'))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime('%Y-%m')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['2016-01', '2016-02']
def test_datetime64formatter_hoursecond(self):
x = Series(pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f'))
def format_func(x):
return x.strftime('%H:%M')
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ['10:10', '12:12']
class TestNaTFormatting:
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime(
[datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT")
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)
]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(
2014, 1, 1, 12), datetime(2014, 1, 1)]))
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller('isoformat')):
assert f(pd.NaT) == 'NaT'
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, 'a'])
def test_format_percentiles_integer_idx():
# Issue #26660
result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
expected = ['0%', '10%', '20%', '30%', '40%', '50%',
'60%', '70%', '80%', '90%', '100%']
assert result == expected
def test_repr_html_ipython_config(ip):
code = textwrap.dedent("""\
import pandas as pd
df = pd.DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
""")
result = ip.run_cell(code)
assert not result.error_in_exec
| bsd-3-clause |
KayaBaber/Computational-Physics | Assignment_3_chaos_and_pendulums/Pre-GitHub-versions/Phys440_Assignment03_Prob2 (2).py | 1 | 1771 | '''
Kaya Baber
Physics 440 - Computational Physics
Assignment 3
Problem 2
'''
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
import math
def f(thetas, t, b, gamma, omega):
#pendulum driven-damped function
theta=thetas[0]
thetaDot=thetas[1]
thetaDouble=-b*thetaDot - math.sin(theta) + gamma*math.cos(omega*t)
return thetaDot, thetaDouble
#initial conditions
theta0=-0.0
thetaDot0=0.0
thetas=[theta0,thetaDot0]
#generating loop
for i in range(7):
#constants
b=0.05
omega=0.7
gamma=0.4+(i*0.1)
#FIX YO OMEGA STUFF
#computation parameters
steps=100
periods=10
t = np.linspace(0, periods*(math.pi*2.0/omega), steps*(math.pi*2.0/omega)+1)
#ODE solution
sol = odeint(f, thetas, t, args=(b, gamma, omega))
#TAKE THE STROBE
plt.plot(t, sol[:, 1], 'b', label='thetaDot(t)')
plt.xlabel('time')
plt.ylabel('theta')
plt.grid()
#plt.savefig('/Users/student/kbaber/Desktop/Phys440/Assignment 3/plots//gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
plt.savefig('\Users\Kaya\Google Drive\School\Phys 440\Assignments\Assignment 3\plots\\gamma'+str(gamma)+'_thetaDot_t.png',bbox_inches='tight')
#plt.show()
plt.clf()
plt.plot(sol[:,0], sol[:, 1], 'g', label='theta-Dot(theta)')
plt.xlabel('theta')
plt.ylabel('theta-Dot')
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
#plt.savefig('/Users/student/kbaber/Desktop/Phys440/Assignment 3/plots//gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
plt.savefig('\Users\Kaya\Google Drive\School\Phys 440\Assignments\Assignment 3\plots\\gamma'+str(gamma)+'_thetaDot_theta.png',bbox_inches='tight')
#plt.show()
plt.clf()
print t | mit |
abhisg/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
ebraunkeller/kerouac-bobblehead | ProcPublicView.py | 1 | 1285 | # Process the attendance table for a public viewitems
# join attendance with demographics and
# aggregate out the individual students
import pandas as pd
import csv
# map the codes to the definitions:
frl_dict = {"00":'Not',"01":'Free',"02":'Red'}
lep_dict = {"00":'Not',"01":'LEP',"02":'LEP',"03":'LEP'}
AttFile="C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\TableauFormat\DistrictAttend.csv"
DemFile = "C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\TableauFormat\AllStudents.csv"
OutFile = "C:\Users\Elaine\Documents\BKL\Lowell\\2016-2017\TableauFormat\PublicAttend.csv"
dfA=pd.read_csv(AttFile,dtype='str')
dfS=pd.read_csv(DemFile,dtype='str')
# remove spaces on the field names
dfS.columns=['LASID','Gender','Race','Ethnicity','SchoolID','SchoolName','Grade','HRTeacher','PrimaryLanguage','HomeLang','LimitedEnglish','FRLStatus','SPEDStatus']
#Join the tables
df = pd.merge(dfA,dfS,left_on='Lasid',right_on='LASID')
# Replace the entries with meaninful values
df.FRLStatus= df.FRLStatus.replace(frl_dict)
df.LimitedEnglish= df.LimitedEnglish.replace(lep_dict)
df0=df[['Date','Status','Day','SchoolYear','Gender','Race','Ethnicity','SchoolName','Grade',
'SchoolID','LimitedEnglish','FRLStatus','SPEDStatus']]
df0.to_csv(OutFile)
| mit |
xyguo/scikit-learn | sklearn/decomposition/truncated_svd.py | 19 | 7884 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0782... 0.0552... 0.0544... 0.0499... 0.0413...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.279...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
jreese/ircstat | ircstat/defaults.py | 1 | 4274 | # Copyright 2013 John Reese
# Licensed under the MIT license
######################
# Parsing options
######################
# the regex to parse data from irc log filenames.
# must contain two named matching groups:
# channel: the name of the channel
# date: the date of the conversation
filename_regex = r'(?P<channel>#?[a-z]+)_(?P<date>\d{8}).log'
# the format of the date content in the matched filename.
# must follow python's datetime.strptime() format, as defined at
# http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
filename_date_format = r'%Y%m%d'
# character encoding used by the log files
# 'latin1' is the default, but 'utf-8' is probably a good fallback
log_encoding = 'latin1'
# a regex component to match a timestamp
# only required by the default log_*_regex values
timestamp_regex = r'^\[(?P<time>\d\d:\d\d:\d\d)\]'
# a regex component to match a nick
# only required by the default log_*_regex values
nick_regex = r'(?P<nick>\S+)'
# regex to match a line containing a join action
# must contain these named matching groups:
# time: the timestamp of the action
# nick: the nick that joined
# may optionally contain these named matching groups:
# hostmask: the hostmask of the nick that joined
log_join_regex = r'%s \*\*\* Joins: %s \((?P<hostmask>[^)]+)\)'\
% (timestamp_regex, nick_regex)
# regex to match a line containing a part action
# must contain these named matching groups:
# time: the timestamp of the action
# nick: the nick that left
# may optionally contain these named matching groups:
# hostmask: the hostmask of the nick that left
# reason: the reason that the nick left
log_part_regex = r'%s \*\*\* Parts: %s \((?P<hostmask>[^)]+)\) '\
'\((?P<reason>[^)]*)\)' % (timestamp_regex, nick_regex)
# regex to match a line containing a quit action
# must contain these named matching groups:
# time: the timestamp of the action
# nick: the nick that quit
# may optionally contain these named matching groups:
# hostmask: the hostmask of the nick that quit
# reason: the reason that the nick quit
log_quit_regex = r'%s \*\*\* Quits: %s \((?P<hostmask>[^)]+)\) '\
'\((?P<reason>[^)]*)\)' % (timestamp_regex, nick_regex)
# regex to match a line containing a user /me action
# must contain these named matching groups:
# time: the timestamp of the action
# nick: the nick that sent the action
# content: the contents of the action
log_action_regex = r'%s \* %s (?P<content>.*)' % (timestamp_regex, nick_regex)
# regex to match a line containing a user message
# must contain these named matching groups:
# time: the timestamp of the message
# nick: the nick that sent the message
# content: the contents of the message
log_message_regex = r'%s <%s> (?P<content>.*)' % (timestamp_regex, nick_regex)
# the format of the time content in the matched log timestamp
# must follow python's datetime.strptime() format, as defined at
# http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
log_timestamp_format = r'%H:%M:%S'
######################
# User/nick options
######################
# list of nicks to be treated as bots rather than humans
# nicks should always be lowercase
bots = ['chanserv']
# mapping of nick aliases, for users that use multiple or alternate nicks
# keys consist of regexes, and are forced to match the entire nick
# use .* to match arbitrary prefixes or suffixes
# values should be the primary nick to use in place of the aliased nick
# note: a large number of aliases may impact time spent parsing log files
aliases = {}
# list of nicks, or regexes to match to nicks, that should be ignored
ignore = []
######################
# Graphing options
######################
# image format to use as output from matplotlib
image_format = 'png'
# enable matplotlib's XKCD mode, where graphs will look hand-drawn
xkcd_mode = True
# for time-series graphs, how many days back should the graphs show
graph_days = 180
# for graphs comparing multiple users, how many of the "top" users to show
graph_users = 10
######################
# Plugin options
######################
# plugins to blacklist from running
# must be an iterable containing strings of plugin names,
# without the 'Plugin' suffix
plugin_blacklist = []
| mit |
googleapis/python-bigquery | samples/snippets/jupyter_tutorial_test.py | 1 | 5407 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
IPython = pytest.importorskip("IPython")
interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
tools = pytest.importorskip("IPython.testing.tools")
matplotlib = pytest.importorskip("matplotlib")
# Ignore semicolon lint warning because semicolons are used in notebooks
# flake8: noqa E703
@pytest.fixture(scope="session")
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture()
def ipython_interactive(request, ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
def _strip_region_tags(sample_text):
"""Remove blank lines and region tags from sample text"""
magic_lines = [
line for line in sample_text.split("\n") if len(line) > 0 and "# [" not in line
]
return "\n".join(magic_lines)
def test_jupyter_tutorial(ipython):
matplotlib.use("agg")
ip = IPython.get_ipython()
ip.extension_manager.load_extension("google.cloud.bigquery")
sample = """
# [START bigquery_jupyter_magic_gender_by_year]
%%bigquery
SELECT
source_year AS year,
COUNT(is_male) AS birth_count
FROM `bigquery-public-data.samples.natality`
GROUP BY year
ORDER BY year DESC
LIMIT 15
# [END bigquery_jupyter_magic_gender_by_year]
"""
result = ip.run_cell(_strip_region_tags(sample))
result.raise_error() # Throws an exception if the cell failed.
sample = """
# [START bigquery_jupyter_magic_gender_by_year_var]
%%bigquery total_births
SELECT
source_year AS year,
COUNT(is_male) AS birth_count
FROM `bigquery-public-data.samples.natality`
GROUP BY year
ORDER BY year DESC
LIMIT 15
# [END bigquery_jupyter_magic_gender_by_year_var]
"""
result = ip.run_cell(_strip_region_tags(sample))
result.raise_error() # Throws an exception if the cell failed.
assert "total_births" in ip.user_ns # verify that variable exists
total_births = ip.user_ns["total_births"]
# [START bigquery_jupyter_plot_births_by_year]
total_births.plot(kind="bar", x="year", y="birth_count")
# [END bigquery_jupyter_plot_births_by_year]
sample = """
# [START bigquery_jupyter_magic_gender_by_weekday]
%%bigquery births_by_weekday
SELECT
wday,
SUM(CASE WHEN is_male THEN 1 ELSE 0 END) AS male_births,
SUM(CASE WHEN is_male THEN 0 ELSE 1 END) AS female_births
FROM `bigquery-public-data.samples.natality`
WHERE wday IS NOT NULL
GROUP BY wday
ORDER BY wday ASC
# [END bigquery_jupyter_magic_gender_by_weekday]
"""
result = ip.run_cell(_strip_region_tags(sample))
result.raise_error() # Throws an exception if the cell failed.
assert "births_by_weekday" in ip.user_ns # verify that variable exists
births_by_weekday = ip.user_ns["births_by_weekday"]
# [START bigquery_jupyter_plot_births_by_weekday]
births_by_weekday.plot(x="wday")
# [END bigquery_jupyter_plot_births_by_weekday]
# [START bigquery_jupyter_import_and_client]
from google.cloud import bigquery
client = bigquery.Client()
# [END bigquery_jupyter_import_and_client]
# [START bigquery_jupyter_query_plurality_by_year]
sql = """
SELECT
plurality,
COUNT(1) AS count,
year
FROM
`bigquery-public-data.samples.natality`
WHERE
NOT IS_NAN(plurality) AND plurality > 1
GROUP BY
plurality, year
ORDER BY
count DESC
"""
df = client.query(sql).to_dataframe()
df.head()
# [END bigquery_jupyter_query_plurality_by_year]
# [START bigquery_jupyter_plot_plurality_by_year]
pivot_table = df.pivot(index="year", columns="plurality", values="count")
pivot_table.plot(kind="bar", stacked=True, figsize=(15, 7))
# [END bigquery_jupyter_plot_plurality_by_year]
# [START bigquery_jupyter_query_births_by_gestation]
sql = """
SELECT
gestation_weeks,
COUNT(1) AS count
FROM
`bigquery-public-data.samples.natality`
WHERE
NOT IS_NAN(gestation_weeks) AND gestation_weeks <> 99
GROUP BY
gestation_weeks
ORDER BY
gestation_weeks
"""
df = client.query(sql).to_dataframe()
# [END bigquery_jupyter_query_births_by_gestation]
# [START bigquery_jupyter_plot_births_by_gestation]
ax = df.plot(kind="bar", x="gestation_weeks", y="count", figsize=(15, 7))
ax.set_title("Count of Births by Gestation Weeks")
ax.set_xlabel("Gestation Weeks")
ax.set_ylabel("Count")
# [END bigquery_jupyter_plot_births_by_gestation]
| apache-2.0 |
bikong2/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/series/test_timeseries.py | 6 | 31551 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
from datetime import datetime, timedelta, time
import pandas as pd
import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, StringIO, product
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.offsets import BDay, BMonthEnd
from pandas import (Index, Series, date_range, NaT, concat, DataFrame,
Timestamp, to_datetime, offsets,
timedelta_range)
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, _skip_if_has_locale)
from pandas.tests.series.common import TestData
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def assert_range_equal(left, right):
assert (left.equals(right))
assert (left.freq == right.freq)
assert (left.tz == right.tz)
class TestTimeSeries(TestData):
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, self.ts.index)
tm.assert_index_equal(unshifted.index, self.ts.index)
tm.assert_numpy_array_equal(unshifted.valid().values,
self.ts.values[:-1])
offset = BDay()
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.valid().values, ps.values[:-1])
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, BDay())
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
pytest.raises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index = date_range('2000-01-01', periods=5)
for dtype in ['int32', 'int64']:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
assert_series_equal(result, expected)
# xref 8260
# with tz
s = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s - s.shift()
exp = Series(TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo')
assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='CET'), name='foo')
pytest.raises(ValueError, lambda: s - s2)
def test_shift2(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
tm.assert_index_equal(result.index, exp_index)
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.shift, 1)
def test_shift_dst(self):
# GH 13926
dates = date_range('2016-11-06', freq='H', periods=10, tz='US/Eastern')
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(1)
exp_vals = [NaT] + dates.asobject.values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(-2)
exp_vals = dates.asobject.values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype='datetime64[ns, US/Eastern]')
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_series_equal(shifted, shifted3)
pytest.raises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),
name='ts')
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
offset = BDay()
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert (len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert (len(truncated) == 0)
pytest.raises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30), datetime(
2009, 11, 30), datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq(BDay())
monthly_ts = daily_ts.asfreq(BMonthEnd())
tm.assert_series_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
assert len(result) == 0
assert result is not ts
daily_ts = ts.asfreq('D', fill_value=-1)
result = daily_ts.value_counts().sort_index()
expected = Series([60, 1, 1, 1],
index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
tm.assert_series_equal(result, expected)
def test_asfreq_datetimeindex_empty_series(self):
# GH 14320
expected = Series(index=pd.DatetimeIndex(
["2016-09-29 11:00"])).asfreq('H')
result = Series(index=pd.DatetimeIndex(["2016-09-29 11:00"]),
data=[3]).asfreq('H')
tm.assert_index_equal(expected.index, result.index)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
assert rs[1] == 1
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
# with tz
s = Series(
date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s.diff()
assert_series_equal(result, Series(
TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo'))
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
assert index == ts.index[5]
ts[-5:] = np.NaN
index = ts.last_valid_index()
assert index == ts.index[-6]
ts[:] = np.nan
assert ts.last_valid_index() is None
assert ts.first_valid_index() is None
ser = Series([], index=[])
assert ser.last_valid_index() is None
assert ser.first_valid_index() is None
# GH12800
empty = Series()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
def test_empty_series_ops(self):
# see issue #13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
assert_series_equal(a, a + b)
assert_series_equal(a, a - b)
assert_series_equal(a, b + a)
pytest.raises(TypeError, lambda x, y: x - y, b, a)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq is not None
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, .25)] +
[iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in np.arange(0, 2, .25)] + [NaT])
assert_series_equal(result, expected)
s = concat([Series([epoch + t for t in range(20)]
).astype(float), Series([np.nan])],
ignore_index=True)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
result = to_datetime([1, 2, 'NaT', pd.NaT, np.nan], unit='D')
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 3)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
to_datetime([1, 2, 'foo'], unit='D')
with pytest.raises(ValueError):
to_datetime([1, 2, 111111111], unit='D')
# coerce we can process
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 1)
result = to_datetime([1, 2, 'foo'], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
series = Series(dates)
assert np.issubdtype(series.dtype, np.dtype('M8[ns]'))
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00.000000\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
assert result == expected
def test_asfreq_keep_index_name(self):
# GH #9854
index_name = 'bar'
index = pd.date_range('20130101', periods=20, name=index_name)
df = pd.DataFrame([x for x in range(20)], columns=['foo'], index=index)
assert index_name == df.index.name
assert index_name == df.asfreq('10D').index.name
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc['1/4/2000':]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_between(self):
series = Series(date_range('1/1/2000', periods=10))
left, right = series[[2, 7]]
result = series.between(left, right)
expected = (series >= left) & (series <= right)
assert_series_equal(result, expected)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_types(self):
# GH11818
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
pytest.raises(ValueError, rng.indexer_between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
frame = DataFrame({'A': 0}, index=rng)
pytest.raises(ValueError, frame.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
series = Series(0, index=rng)
pytest.raises(ValueError, series.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time_formats(self):
# GH11818
_skip_if_has_locale()
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
strings = [("2:00", "2:30"), ("0200", "0230"), ("2:00am", "2:30am"),
("0200am", "0230am"), ("2:00:00", "2:30:00"),
("020000", "023000"), ("2:00:00am", "2:30:00am"),
("020000am", "023000am")]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
def test_to_period(self):
from pandas.core.indexes.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
exp.index = exp.index.asfreq('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
assert_series_equal(pts, exp)
# GH 7606 without freq
idx = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
exp_idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], freq='D')
s = Series(np.random.randn(4), index=idx)
expected = s.copy()
expected.index = exp_idx
assert_series_equal(s.to_period(), expected)
df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
expected = df.copy()
expected.index = exp_idx
assert_frame_equal(df.to_period(), expected)
expected = df.copy()
expected.columns = exp_idx
assert_frame_equal(df.to_period(axis=1), expected)
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
assert '2000-01-01' in result
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range('1 day 1 s', periods=5, freq='h'))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_asfreq_resample_set_correct_freq(self):
# GH5613
# we test if .asfreq() and .resample() set the correct value for .freq
df = pd.DataFrame({'date': ["2012-01-01", "2012-01-02", "2012-01-03"],
'col': [1, 2, 3]})
df = df.set_index(pd.to_datetime(df.date))
# testing the settings before calling .asfreq() and .resample()
assert df.index.freq is None
assert df.index.inferred_freq == 'D'
# does .asfreq() set .freq correctly?
assert df.asfreq('D').index.freq == 'D'
# does .resample() set .freq correctly?
assert df.resample('D').asfreq().index.freq == 'D'
def test_pickle(self):
# GH4606
p = tm.round_trip_pickle(NaT)
assert p is NaT
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = tm.round_trip_pickle(idx)
assert idx_p[0] == idx[0]
assert idx_p[1] is NaT
assert idx_p[2] == idx[2]
# GH11002
# don't infer freq
idx = date_range('1750-1-1', '2050-1-1', freq='7D')
idx_p = tm.round_trip_pickle(idx)
tm.assert_index_equal(idx, idx_p)
def test_setops_preserve_freq(self):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
rng = date_range('1/1/2000', '1/1/2002', name='idx', tz=tz)
result = rng[:50].union(rng[50:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[30:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[60:100])
assert result.name == rng.name
assert result.freq is None
assert result.tz == rng.tz
result = rng[:50].intersection(rng[25:75])
assert result.name == rng.name
assert result.freqstr == 'D'
assert result.tz == rng.tz
nofreq = DatetimeIndex(list(rng[25:75]), name='other')
result = rng[:50].union(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].intersection(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, Timestamp)
assert isinstance(the_max, Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)), 'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
assert isinstance(result, Timestamp)
assert result == exp
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
assert isinstance(result, Timestamp)
assert result == exp
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
assert df['Date'][0] == dates[0][0]
assert df['Forecasting'][0] == dates[0][1]
s = Series(arr['Date'])
assert s[0], Timestamp
assert s[0] == dates[0][0]
s = Series.from_array(arr['Date'], Index([0]))
assert s[0] == dates[0][0]
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
assert isinstance(index.get_level_values(0)[0], Timestamp)
| mit |
nlpub/mnogoznal | mnogoznal/wsd.py | 1 | 9278 | import abc
import csv
from collections import namedtuple, defaultdict, OrderedDict, Counter
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity as sim
from sklearn.pipeline import Pipeline
STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'}
Synset = namedtuple('Synset', 'id synonyms hypernyms bag')
class Inventory(object):
"""Sense inventory representation and loader."""
synsets = {}
index = defaultdict(list)
def __init__(self, inventory_path):
"""
During the construction, BaseWSD parses the given sense inventory file.
"""
def field_to_bag(field):
return {word: freq for record in field.split(', ')
for word, freq in (self.lexeme(record),)
if record}
with open(inventory_path, 'r', encoding='utf-8', newline='') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
id = row[0]
synonyms = field_to_bag(row[2])
hypernyms = field_to_bag(row[4])
self.synsets[id] = Synset(
id=id,
synonyms=synonyms,
hypernyms=hypernyms,
bag={**synonyms, **hypernyms}
)
for word in self.synsets[id].bag:
self.index[word].append(id)
def lexeme(self, record):
"""
Parse the sense representations like 'word#sid:freq'.
Actually, we do not care about the sid field because
we use synset identifiers instead.
"""
if '#' in record:
word, tail = record.split('#', 1)
else:
word, tail = record, None
if tail:
if ':' in tail:
sid, tail = tail.split(':', 1)
else:
sid, tail = tail, None
if tail:
freq = float(tail)
else:
freq = 1
return word, freq
Span = namedtuple('Span', 'token pos lemma index')
class BaseWSD(object):
"""
Base class for word sense disambiguation routines. Should not be used.
Descendant classes must implement the disambiguate_word() method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, inventory):
self.inventory = inventory
def lemmatize(self, sentence):
"""
This method transforms the given sentence into the dict that
maps the word indices to their lemmas. It also excludes those
words which part of speech is in the stop list.
"""
return {i: lemma for i, (_, lemma, pos) in enumerate(sentence)
if pos not in STOP_POS}
@abc.abstractmethod
def disambiguate_word(self, sentence, index):
"""
Return word sense identifier for the given word in the sentence.
"""
if not sentence or not isinstance(sentence, list):
raise ValueError('sentence should be a list')
if not isinstance(index, int) or index < 0 or index >= len(sentence):
raise ValueError('index should be in [0...%d]' % len(sentence))
def disambiguate(self, sentence):
"""
Return word sense identifiers corresponding to the words
in the given sentence.
"""
result = OrderedDict()
for index, span in enumerate(sentence):
# here, span is (token, pos, lemma), but we also need index
span = Span(*span, index)
result[span] = self.disambiguate_word(sentence, index)
return result
class OneBaseline(BaseWSD):
"""
A simple baseline that treats every word as monosemeous. Not thread-safe.
"""
counter = {}
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
word, _, _ = sentence[index]
if word not in self.counter:
self.counter[word] = len(self.counter)
return str(self.counter[word])
class SingletonsBaseline(BaseWSD):
"""
A simple baseline that puts every instance into a different cluster. Not thread-safe.
"""
counter = 0
def __init__(self):
super().__init__(None)
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
self.counter += 1
return str(self.counter)
class SparseWSD(BaseWSD):
"""
A simple sparse word sense disambiguation.
"""
sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())])
def __init__(self, inventory):
super().__init__(inventory)
self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()])
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector
def search(query):
"""
Map synset identifiers to the cosine similarity value.
This function calls the function query(id) that retrieves
the corresponding dict of words.
"""
return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0)
for id in self.inventory.index[lemmas[index]]})
candidates = search(lambda id: self.inventory.synsets[id].synonyms)
# give the hypernyms a chance if nothing is found
if not candidates:
candidates = search(lambda id: self.inventory.synsets[id].bag)
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class DenseWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on SenseGram.
"""
class densedict(dict):
"""
A handy dict that transforms a synset into its dense representation.
"""
def __init__(self, synsets, sensegram):
self.synsets = synsets
self.sensegram = sensegram
def __missing__(self, id):
value = self[id] = self.sensegram(self.synsets[id].bag.keys())
return value
def __init__(self, inventory, wv):
super().__init__(inventory)
self.wv = wv
self.dense = self.densedict(self.inventory.synsets, self.sensegram)
def sensegram(self, words):
"""
This is a simple implementation of SenseGram.
It just averages the embeddings corresponding to the given words.
"""
vectors = self.words_vec(set(words))
if not vectors:
return
return np.mean(np.vstack(vectors.values()), axis=0).reshape(1, -1)
def words_vec(self, words, use_norm=False):
"""
Return a dict that maps the given words to their embeddings.
"""
if callable(getattr(self.wv, 'words_vec', None)):
return self.wv.words_vec(words, use_norm)
return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv}
def disambiguate_word(self, sentence, index):
super().disambiguate_word(sentence, index)
lemmas = self.lemmatize(sentence)
if index not in lemmas:
return
svector = self.sensegram(lemmas.values()) # sentence vector
if svector is None:
return
# map synset identifiers to the cosine similarity value
candidates = Counter({id: sim(svector, self.dense[id]).item(0)
for id in self.inventory.index[lemmas[index]]
if self.dense[id] is not None})
if not candidates:
return
for id, _ in candidates.most_common(1):
return id
class LeskWSD(BaseWSD):
"""
A word sense disambiguation approach that is based on Lesk method.
"""
def __init__(self, inventory):
super().__init__(inventory)
def disambiguate_word(self, sentence, word_index):
super().disambiguate_word(sentence, word_index)
lemmas = self.lemmatize(sentence)
if word_index not in lemmas:
return
mentions_dict = dict()
for synset_number in self.inventory.index[lemmas[word_index]]:
mentions_dict[synset_number] = 0
for context_word in lemmas.values():
if context_word != lemmas[word_index]:
if context_word in self.inventory.synsets[synset_number].synonyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + 1
elif context_word in self.inventory.synsets[synset_number].hypernyms:
mentions_dict[synset_number] = mentions_dict[synset_number] + \
self.inventory.synsets[synset_number].hypernyms[context_word]
if len(mentions_dict) > 0:
return max(mentions_dict, key=mentions_dict.get)
else:
return
| mit |
cactusbin/nyt | matplotlib/doc/users/plotting/examples/annotate_simple04.py | 6 | 1048 | import matplotlib.pyplot as plt
plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ann = ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=0.2",
relpos=(0., 0.),
fc="w"),
)
ann = ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=-0.2",
relpos=(1., 0.),
fc="w"),
)
plt.show()
| unlicense |
willhaines/scikit-rf | doc/source/conf-standard.py | 4 | 7929 | # -*- coding: utf-8 -*-
#
# skrf documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 21 15:10:05 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'numpydoc',
#'inheritance_diagram',
'ipython_console_highlighting',
'ipython_directive',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-rf'
copyright = u'2013, scikit-rf development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_lines = open('../../setup.py').readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('VERSION'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_style = 'scipy.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_options = {
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/scikit-rf-logo-flat-docs.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skrfdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'scikit-rf.tex', u'scikit-rf Documentation',
u'Alex Arsenovic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/scikit-rf-title-flat.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{epstopdf}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
autosummary_generate=True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-rf', u'scikit-rf Documentation',
[u'alex arsenovic'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
}
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 25 | 11187 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
jbloomlab/phydms | phydmslib/weblogo.py | 1 | 56794 | """``weblogo`` module
Module for making sequence logos with the *weblogolib* package distributed with
``weblogo`` This module interfaces with the *weblogolib* API,
and so is only known to work with *weblogolib* version 3.4 and 3.5.
Written by Jesse Bloom and Mike Doud
"""
import collections
import os
import tempfile
import string
import math
import shutil
import natsort
import numpy
import matplotlib
import pylab
import PyPDF2
# the following are part of the weblogo library
import weblogolib # weblogo library
import weblogolib.colorscheme # weblogo library
import corebio.matrix # weblogo library
import corebio.utils # weblogo library
from phydmslib.constants import AA_TO_INDEX, NT_TO_INDEX
matplotlib.use('pdf')
def KyteDoolittleColorMapping(maptype='jet', reverse=True):
"""Maps amino-acid hydrophobicities to colors.
Uses the Kyte-Doolittle hydrophobicity scale defined by::
J. Kyte & R. F. Doolittle:
"A simple method for displaying the hydropathic character of a
protein." J Mol Biol, 157, 105-132
More positive values indicate higher hydrophobicity,
while more negative values indicate lower hydrophobicity.
The returned variable is the 3-tuple *(cmap, mapping_d, mapper)*:
* *cmap* is a ``pylab`` *LinearSegmentedColorMap* object.
* *mapping_d* is a dictionary keyed by the one-letter amino-acid
codes. The values are the colors in CSS2 format (e.g. #FF0000
for red) for that amino acid. The value for a stop codon
(denoted by a ``*`` character) is black (#000000).
* *mapper* is the actual *pylab.cm.ScalarMappable* object.
The optional argument *maptype* should specify a valid ``pylab`` color map.
The optional calling argument *reverse* specifies that we set up the color
map so that the most hydrophobic residue comes first (in the Kyte-Doolittle
scale the most hydrophobic comes last as it has the largest value).
This option is *True* by default as it seems more intuitive to have
charged residues red and hydrophobic ones blue.
"""
d = {'A': 1.8, 'C': 2.5, 'D': -3.5, 'E': -3.5, 'F': 2.8, 'G': -0.4,
'H': -3.2, 'I': 4.5, 'K': -3.9, 'L': 3.8, 'M': 1.9, 'N': -3.5,
'P': -1.6, 'Q': -3.5, 'R': -4.5, 'S': -0.8, 'T': -0.7, 'V': 4.2,
'W': -0.9, 'Y': -1.3}
aas = sorted(AA_TO_INDEX.keys())
hydrophobicities = [d[aa] for aa in aas]
if reverse:
hydrophobicities = [-1 * x for x in hydrophobicities]
mapper = pylab.cm.ScalarMappable(cmap=maptype)
mapper.set_clim(min(hydrophobicities), max(hydrophobicities))
mapping_d = {'*': '#000000'}
for (aa, h) in zip(aas, hydrophobicities):
tup = mapper.to_rgba(h, bytes=True)
(red, green, blue, alpha) = tup
mapping_d[aa] = '#%02x%02x%02x' % (red, green, blue)
assert len(mapping_d[aa]) == 7
cmap = mapper.get_cmap()
return (cmap, mapping_d, mapper)
def MWColorMapping(maptype='jet', reverse=True):
"""Maps amino-acid molecular weights to colors. Otherwise, this
function is identical to *KyteDoolittleColorMapping*
"""
d = {'A': 89, 'R': 174, 'N': 132, 'D': 133, 'C': 121, 'Q': 146, 'E': 147,
'G': 75, 'H': 155, 'I': 131, 'L': 131, 'K': 146, 'M': 149, 'F': 165,
'P': 115, 'S': 105, 'T': 119, 'W': 204, 'Y': 181, 'V': 117}
aas = sorted(AA_TO_INDEX.keys())
mws = [d[aa] for aa in aas]
if reverse:
mws = [-1 * x for x in mws]
mapper = pylab.cm.ScalarMappable(cmap=maptype)
mapper.set_clim(min(mws), max(mws))
mapping_d = {'*': '#000000'}
for (aa, h) in zip(aas, mws):
tup = mapper.to_rgba(h, bytes=True)
(red, green, blue, alpha) = tup
mapping_d[aa] = '#%02x%02x%02x' % (red, green, blue)
assert len(mapping_d[aa]) == 7
cmap = mapper.get_cmap()
return (cmap, mapping_d, mapper)
def SingleColorMapping(maptype="#999999"):
"""Maps all amino acids to the single color given by `maptype`."""
return (None, collections.defaultdict(lambda: maptype), None)
def ChargeColorMapping(maptype='jet', reverse=False):
"""Maps amino-acid charge at neutral pH to colors.
Currently does not use the keyword arguments for *maptype*
or *reverse* but accepts these arguments to be consistent
with KyteDoolittleColorMapping and MWColorMapping for now.
"""
pos_color = '#FF0000'
neg_color = '#0000FF'
neut_color = '#000000'
mapping_d = {'A': neut_color, 'R': pos_color, 'N': neut_color,
'D': neg_color, 'C': neut_color, 'Q': neut_color,
'E': neg_color, 'G': neut_color, 'H': pos_color,
'I': neut_color, 'L': neut_color, 'K': pos_color,
'M': neut_color, 'F': neut_color, 'P': neut_color,
'S': neut_color, 'T': neut_color, 'W': neut_color,
'Y': neut_color, 'V': neut_color}
return (None, mapping_d, None)
def FunctionalGroupColorMapping(maptype='jet', reverse=False):
"""Maps amino-acid functional groups to colors.
Currently does not use the keyword arguments for *maptype*
or *reverse* but accepts these arguments to be consistent
with the other mapping functions, which all get called with
these arguments.
"""
small_color = '#f76ab4'
nucleophilic_color = '#ff7f00'
hydrophobic_color = '#12ab0d'
aromatic_color = '#84380b'
acidic_color = '#e41a1c'
amide_color = '#972aa8'
basic_color = '#3c58e5'
mapping_d = {'G': small_color, 'A': small_color,
'S': nucleophilic_color, 'T': nucleophilic_color,
'C': nucleophilic_color, 'V': hydrophobic_color,
'L': hydrophobic_color, 'I': hydrophobic_color,
'M': hydrophobic_color, 'P': hydrophobic_color,
'F': aromatic_color, 'Y': aromatic_color,
'W': aromatic_color, 'D': acidic_color, 'E': acidic_color,
'H': basic_color, 'K': basic_color, 'R': basic_color,
'N': amide_color, 'Q': amide_color,
'*': '#000000'}
return (None, mapping_d, None)
def LogoPlot(sites, datatype, data, plotfile, nperline,
numberevery=10, allowunsorted=False, ydatamax=1.01,
overlay=None, fix_limits=None, # noqa: F401
fixlongname=False, overlay_cmap=None, ylimits=None,
relativestackheight=1, custom_cmap='jet', map_metric='kd',
noseparator=False, underlay=False, scalebar=False):
"""Create sequence logo showing amino-acid or nucleotide preferences.
The heights of each letter is equal to the preference of
that site for that amino acid or nucleotide.
Note that stop codons may or may not be included in the logo
depending on whether they are present in *pi_d*.
CALLING VARIABLES:
* *sites* is a list of all of the sites that are being included
in the logo, as strings. They must be in natural sort or an error
will be raised **unless** *allowunsorted* is *True*. The sites
in the plot are ordered in the same arrangement
listed in *sites*. These should be **strings**, not integers.
* *datatype* should be one of the following strings:
* 'prefs' for preferences
* 'diffprefs' for differential preferences
* 'diffsel' for differential selection
* *data* is a dictionary that has a key for every entry in
*sites*. For every site *r* in *sites*, *sites[r][x]*
is the value for character *x*.
Preferences must sum to one; differential preferences to zero.
All sites must have the same set of characters. The characters
must be the set of nucleotides or amino acids with or without
stop codons.
* *plotfile* is a string giving the name of the created PDF file
of the logo plot.
It must end in the extension ``.pdf``.
* *nperline* is the number of sites per line. Often 40 to 80 are
good values.
* *numberevery* is specifies how frequently we put labels for the sites on
x-axis.
* *allowunsorted* : if *True* then we allow the entries in *sites* to
**not** be sorted. This means that the logo plot will **not** have
sites in sorted order.
* *ydatamax* : meaningful only if *datatype* is 'diffprefs'. In this case,
it gives the maximum that the logo stacks extend in the positive and
negative directions. Cannot be smaller than the maximum extent of the
differential preferences.
* *ylimits*: is **mandatory** if *datatype* is 'diffsel', and meaningless
otherwise. It is *(ymin, ymax)* where *ymax > 0 > ymin*, and gives extent
of the data in the positive and negative directions. Must encompass the
actual maximum and minimum of the data.
* *overlay* : make overlay bars that indicate other properties for
the sites. If you set to something other than `None`, it should be
a list giving one to three properties. Each property is a tuple:
*(prop_d, shortname, longname)* where:
- *prop_d* is a dictionary keyed by site numbers that are in *sites*.
For each *r* in *sites*, *prop_d[r]* gives the value of the property,
or if there is no entry in *prop_d* for *r*, then the property
is undefined and is colored white. Properties can either be:
* continuous: in this case, all of the values should be numbers.
* discrete : in this case, all of the values should be strings.
While in practice, if you have more than a few discrete
categories (different strings), the plot will be a mess.
- *shortname* : short name for the property; will not format well
if more than 4 or 5 characters.
- *longname* : longer name for property used on axes label. Can be the
same as *shortname* if you don't need a different long name.
- In the special case where both *shortname* and *longname* are
the string `wildtype`, then rather than an overlay bar we
right the one-character wildtype identity in `prop_d` for each
site.
* *fix_limits* is only meaningful if *overlay* is being used. In this case,
for any *shortname* in *overlay* that also keys an entry in
*fix_limits*, we use *fix_limits[shortname]* to set the limits for
*shortname*. Specifically, *fix_limits[shortname]* should be the 2-tuple
*(ticks, ticknames)*. *ticks* should be a list of tick locations
(numbers) and *ticknames* should be a list of the corresponding tick
label for that tick.
* If *fixlongname* is *True*, then we use the *longname* in *overlay*
exactly as written; otherwise we add a parenthesis indicating the
*shortname* for which this *longname* stands.
* *overlay_cmap* can be the name of a valid *matplotlib.colors.Colormap*,
such as the string *jet* or *bwr*. Otherwise, it can be *None* and a
(hopefully) good choice will be made for you.
* *custom_cmap* can be the name of a valid *matplotlib.colors.Colormap*
which will be used to color amino-acid one-letter codes in the logoplot
by the *map_metric* when either 'kd' or 'mw' is used as *map_metric*.
If *map_metric* is 'singlecolor', then should be string giving the color
to plot.
* *relativestackheight* indicates how high the letter stack is relative to
the default. The default is multiplied by this number, so make it > 1
for a higher letter stack.
* *map_metric* specifies the amino-acid property metric used to map colors
to amino-acid letters. Valid options are
'kd'(Kyte-Doolittle hydrophobicity scale, default),
'mw' (molecular weight),
'functionalgroup' (functional groups: small, nucleophilic, hydrophobic,
aromatic, basic, acidic, and amide),
'charge' (charge at neutral pH), and
'singlecolor'. If 'charge' is used, then the argument for *custom_cmap*
will no longer be meaningful, since 'charge' uses its own
blue/black/red colormapping. Similarly, 'functionalgroup' uses its own
colormapping.
* *noseparator* is only meaningful if *datatype* is 'diffsel' or
'diffprefs'. If it set to *True*, then we do **not** a black
horizontal line to separate positive and negative values.
* *underlay* if `True` then make an underlay rather than an overlay.
* *scalebar*: show a scale bar. If `False`, no scale bar shown. Otherwise
should be a 2-tuple of `(scalebarlen, scalebarlabel)`. Currently only
works when data is `diffsel`.
"""
assert datatype in ['prefs', 'diffprefs', 'diffsel'], ("Invalid datatype "
"{0}"
.format(datatype))
# check data, and get characters
assert sites, "No sites specified"
assert set(sites) == set(data.keys()), ("Not a match between "
"sites and the keys of data")
characters = list(data[sites[0]].keys())
aas = sorted(AA_TO_INDEX.keys())
if set(characters) == set(NT_TO_INDEX.keys()):
alphabet_type = 'nt'
elif set(characters) == set(aas) or set(characters) == set(aas + ['*']):
alphabet_type = 'aa'
else:
raise ValueError("Invalid set of characters in data. Does not specify "
"either nucleotides or amino acids:\n{0}"
.format(str(characters)))
for r in sites:
if set(data[r].keys()) != set(characters):
raise ValueError("Not all sites in data have the same "
"set of characters")
firstblankchar = 'B' # char for first blank space for diffprefs / diffsel
assert firstblankchar not in characters, "firstblankchar in characters"
lastblankchar = 'b' # char for last blank space for diffprefs / diffsel
assert lastblankchar not in characters, "lastblankchar in characters"
separatorchar = '-' # separates pos and neg for diffprefs / diffsel
assert separatorchar not in characters, "lastblankchar in characters"
# height of separator as frac of total for diffprefs / diffsel
separatorheight = 0 if noseparator else 0.02
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
if os.path.isfile(plotfile):
os.remove(plotfile) # remove existing plot
if not allowunsorted:
sorted_sites = natsort.natsorted(sites)
if sorted_sites != sites:
raise ValueError("sites is not properly sorted")
# Following are specifications of weblogo sizing taken from its docs
# stack width in points, set to this in weblogo call below (default 10.8)
stackwidth = 9.5
barheight = 5.5 # height of bars in points if using overlay
barspacing = 2.0 # spacing between bars in points if using overlay
# ratio of stack height:width, doesn't count part going over max value of 1
stackaspectratio = 4.4
assert relativestackheight > 0, "relativestackheight must be > 0"
stackaspectratio *= relativestackheight
if overlay:
if len(overlay) > 3:
raise ValueError("overlay cannot have more than 3 entries")
ymax = ((stackaspectratio * stackwidth + len(overlay) *
(barspacing + barheight)) /
float(stackaspectratio * stackwidth))
# effective aspect ratio for full range
aspectratio = ymax * stackaspectratio
else:
ymax = 1.0
aspectratio = stackaspectratio
rmargin = 11.5 # right margin in points, fixed by weblogo
stackheightmargin = 16 # margin between stacks in points, fixed by weblogo
showscalebar = False
try:
# write data into transfacfile (a temporary file)
(fd, transfacfile) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
# keyed by site index (0, 1, ...)
# with values ordered lists for characters from bottom to top
ordered_alphabets = {}
if datatype == 'prefs':
chars_for_string = characters
f.write('ID ID\nBF BF\nP0 %s\n' % ' '.join(chars_for_string))
for (isite, r) in enumerate(sites):
f.write('%d %s\n' % (isite, ' '.join([str(data[r][x])
for x in characters])))
pi_r = [(data[r][x], x) for x in characters]
pi_r.sort()
# order from smallest to biggest
ordered_alphabets[isite] = [tup[1] for tup in pi_r]
elif datatype == 'diffprefs':
chars_for_string = characters + [firstblankchar,
lastblankchar,
separatorchar]
# maximum possible range of data, multiply by two for range
ydatamax *= 2.0
f.write('ID ID\nBF BF\nP0 %s\n' % ' '.join(chars_for_string))
for (isite, r) in enumerate(sites):
positivesum = sum((data[r][x] for x in characters
if data[r][x] > 0)) + separatorheight / 2.0
negativesum = sum((data[r][x] for x in characters
if data[r][x] < 0)) - separatorheight / 2.0
if abs(positivesum + negativesum) > 1.0e-3:
raise ValueError("Differential preferences sum of %s is "
"not close to zero for site %s"
% (positivesum + negativesum, r))
if 2.0 * positivesum > ydatamax:
raise ValueError("You need to increase ydatamax: the "
"total differential preferences sum to "
"more than the y-axis limits. Right now, "
"ydatamax is %.3f while the total "
"differential preferences are %.3f"
% (ydatamax, 2.0 * positivesum))
f.write('%d' % isite)
deltapi_r = []
for x in characters:
deltapi_r.append((data[r][x], x))
f.write(' %s' % (abs(data[r][x]) / float(ydatamax)))
deltapi_r.sort()
firstpositiveindex = 0
while deltapi_r[firstpositiveindex][0] < 0:
firstpositiveindex += 1
# order from most neg to most pos w/ blank characters and seps
ordered_alphabets[isite] = ([firstblankchar] +
[tup[1] for tup in
deltapi_r[:firstpositiveindex]] +
[separatorchar] +
[tup[1] for tup in
deltapi_r[firstpositiveindex:]] +
[lastblankchar])
f.write(' %g %g %g\n'
% (0.5 * (ydatamax + 2.0 * negativesum) / ydatamax,
0.5 * (ydatamax + 2.0 * negativesum) / ydatamax,
separatorheight)) # heights for blank chars & seps
elif datatype == 'diffsel':
assert ylimits, "You must specify ylimits if using diffsel"
(dataymin, dataymax) = ylimits
assert dataymax > 0 > dataymin, ("Invalid ylimits of {0}"
.format(ylimits))
yextent = float(dataymax - dataymin)
separatorheight *= yextent
chars_for_string = characters + [firstblankchar,
lastblankchar,
separatorchar]
f.write('ID ID\nBF BF\nP0 {0}\n'
.format(' '.join(chars_for_string)))
for (isite, r) in enumerate(sites):
positivesum = sum((data[r][x] for x in characters
if data[r][x] > 0)) + separatorheight / 2.0
negativesum = sum((data[r][x] for x in characters
if data[r][x] < 0)) - separatorheight / 2.0
assert positivesum <= dataymax, ("Data exceeds ylimits in "
"positive direction")
assert negativesum >= dataymin, ("Data exceeds ylimits in "
"negative direction")
f.write('{0}'.format(isite))
diffsel_r = []
for x in characters:
diffsel_r.append((data[r][x], x))
f.write(' {0}'.format(abs(data[r][x]) / yextent))
diffsel_r.sort()
firstpositiveindex = 0
while diffsel_r[firstpositiveindex][0] < 0:
firstpositiveindex += 1
# order from most neg to most pos with blank chars and seps
ordered_alphabets[isite] = ([firstblankchar] +
[tup[1] for tup in
diffsel_r[:firstpositiveindex]] +
[separatorchar] +
[tup[1] for tup in
diffsel_r[firstpositiveindex:]] +
[lastblankchar])
# heights for blank charactors and separators
f.write(' %g %g %g\n'
% ((negativesum - dataymin) / yextent,
(dataymax - positivesum) / yextent,
separatorheight / yextent))
# height of one unit on y-axis in points
heightofone = stackwidth * stackaspectratio / yextent
assert heightofone > 0
if scalebar:
showscalebar = (heightofone * scalebar[0], scalebar[1])
else:
raise ValueError("Invalid datatype of %s" % datatype)
f.close()
# create web logo
charstring = ''.join(chars_for_string)
assert len(charstring) == len(chars_for_string),\
("Length of charstring doesn't match length of "
"chars_for_string. Do you have unallowable multi-letter "
"characters?\n%s"
% (str(chars_for_string)))
logoprior = weblogolib.parse_prior('equiprobable', charstring, 0)
motif = _my_Motif.read_transfac(open(transfacfile), charstring)
logodata = weblogolib.LogoData.from_counts(motif.alphabet,
motif, logoprior)
logo_options = weblogolib.LogoOptions()
logo_options.fineprint = None
logo_options.stacks_per_line = nperline
logo_options.stack_aspect_ratio = aspectratio
logo_options.stack_width = stackwidth
logo_options.unit_name = 'probability'
logo_options.show_yaxis = False
logo_options.yaxis_scale = ymax
if alphabet_type == 'aa':
map_functions = {'kd': KyteDoolittleColorMapping,
'mw': MWColorMapping,
'charge': ChargeColorMapping,
'functionalgroup': FunctionalGroupColorMapping,
'singlecolor': SingleColorMapping}
map_fcn = map_functions[map_metric]
(cmap, colormapping, mapper) = map_fcn(maptype=custom_cmap)
elif alphabet_type == 'nt':
colormapping = {}
colormapping['A'] = '#008000'
colormapping['T'] = '#FF0000'
colormapping['C'] = '#0000FF'
colormapping['G'] = '#FFA500'
else:
raise ValueError("Invalid alphabet_type %s" % alphabet_type)
# black but doesn't matter. modified weblogo code replaces w/ empty ' '
colormapping[firstblankchar] = colormapping[lastblankchar] = '#000000'
colormapping[separatorchar] = '#000000' # black
color_scheme = weblogolib.colorscheme.ColorScheme()
for x in chars_for_string:
if hasattr(color_scheme, 'rules'):
color_scheme.rules.append((weblogolib.colorscheme.SymbolColor(
x, colormapping[x], "'%s'" % x)))
else:
# this part is needed for weblogo 3.4
color_scheme.groups.append((weblogolib.colorscheme.ColorGroup(
x, colormapping[x], "'%s'" % x)))
logo_options.color_scheme = color_scheme
logo_options.annotate = [{True: r, False: ''}[0 == isite % numberevery]
for (isite, r) in enumerate(sites)]
logoformat = weblogolib.LogoFormat(logodata, logo_options)
# _my_pdf_formatter is modified from weblogo version 3.4 source code
# to allow custom ordering of the symbols.
pdf = _my_pdf_formatter(logodata, logoformat, ordered_alphabets)
with open(plotfile, 'wb') as f:
f.write(pdf)
assert os.path.isfile(plotfile), ("Failed to find expected plotfile %s"
% plotfile)
finally:
# close if still open
try:
f.close()
except Exception:
pass
# remove temporary file
if os.path.isfile(transfacfile):
os.remove(transfacfile)
# now build the overlay
if overlay or showscalebar:
try:
(fdoverlay, overlayfile) = tempfile.mkstemp(suffix='.pdf')
(fdmerged, mergedfile) = tempfile.mkstemp(suffix='.pdf')
foverlay = os.fdopen(fdoverlay, 'wb')
foverlay.close() # close, but we still have the path overlayfile
fmerged = os.fdopen(fdmerged, 'wb')
logoheight = stackwidth * stackaspectratio + stackheightmargin
LogoOverlay(sites, overlayfile, overlay, nperline,
sitewidth=stackwidth, rmargin=rmargin,
logoheight=logoheight, barheight=barheight,
barspacing=barspacing, fix_limits=fix_limits,
fixlongname=fixlongname, overlay_cmap=overlay_cmap,
underlay=underlay, scalebar=showscalebar)
plotfile_f = open(plotfile, 'rb')
plot = PyPDF2.PdfFileReader(plotfile_f).getPage(0)
overlayfile_f = open(overlayfile, 'rb')
overlaypdf = PyPDF2.PdfFileReader(overlayfile_f).getPage(0)
xshift = overlaypdf.artBox[2] - plot.artBox[2]
yshift = (barheight + barspacing) * len(overlay) - 0.5 * barspacing
overlaypdf.mergeTranslatedPage(plot, xshift,
yshift * int(underlay), expand=True)
overlaypdf.compressContentStreams()
output = PyPDF2.PdfFileWriter()
output.addPage(overlaypdf)
output.write(fmerged)
fmerged.close()
shutil.move(mergedfile, plotfile)
finally:
try:
plotfile_f.close()
except Exception:
pass
try:
overlayfile_f.close()
except Exception:
pass
try:
foverlay.close()
except Exception:
pass
try:
fmerged.close()
except Exception:
pass
for fname in [overlayfile, mergedfile]:
if os.path.isfile(fname):
os.remove(fname)
#########################################################################
# The following code is modified from weblogo (version 3.4), which
# comes with the following license:
#
# -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 Gavin E. Crooks
# Copyright (c) 2006-2011, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Replicates README.txt
def _my_pdf_formatter(data, pdfformat, ordered_alphabets):
"""Generate a logo in PDF format.
Modified from weblogo version 3.4 source code.
"""
eps = _my_eps_formatter(data, pdfformat, ordered_alphabets).decode()
gs = weblogolib.GhostscriptAPI()
return gs.convert('pdf', eps, pdfformat.logo_width, pdfformat.logo_height)
def _my_eps_formatter(logodata, format, ordered_alphabets): # noqa: F401
"""Generate a logo in Encapsulated Postscript (EPS)
Modified from weblogo version 3.4 source code.
*ordered_alphabets* is a dictionary keyed by zero-indexed
consecutive sites, with values giving order of characters
from bottom to top.
"""
substitutions = {}
from_format = [
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right", "line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"]
for s in from_format:
substitutions[s] = getattr(format, s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join(("[", str(color.red), str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
if hasattr(format.color_scheme, 'rules'):
grouplist = format.color_scheme.rules
else:
# this line needed for weblogo 3.4
grouplist = format.color_scheme.groups
for group in grouplist:
cf = format_color(group.color)
for s in group.symbols:
colors.append(" ("+s+") " + cf)
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = None # JDB
# JDB conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start - format.first_index
seq_to = format.logo_end - format.first_index + 1
# seq_index : zero based index into sequence data
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to):
stack_index = seq_index - seq_from
if stack_index != 0 and (stack_index % format.stacks_per_line) == 0:
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("(%s) StartStack" % format.annotate[seq_index])
if conv_factor:
raise ValueError("Can only scale stack heights by probability.")
# stack_height = (logodata.entropy[seq_index] *
# std_units[format.unit_name])
else:
stack_height = 1.0 # Probability
# The following code modified by JDB to use ordered_alphabets
# and also to replace the "blank" characters 'b' and 'B'
# by spaces.
s_d = dict(zip(logodata.alphabet, logodata.counts[seq_index]))
s = []
for aa in ordered_alphabets[seq_index]:
if aa not in ['B', 'b']:
s.append((s_d[aa], aa))
else:
s.append((s_d[aa], ' '))
# s = [(s_d[aa], aa) for aa in ordered_alphabets[seq_index]]
# Sort by frequency. If equal frequency then reverse alphabetic
# (So sort reverse alphabetic first, then frequencty)
# TODO: doublecheck this actual works
# s = list(zip(logodata.counts[seq_index], logodata.alphabet))
# s.sort(key= lambda x: x[1])
# s.reverse()
# s.sort(key= lambda x: x[0])
# if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0:
fraction_width = 1.0
if format.scale_width:
fraction_width = logodata.weight[seq_index]
# print(fraction_width, file=sys.stderr)
for c in s:
data.append(" %f %f (%s) ShowSymbol"
% (fraction_width, c[0]*stack_height/C, c[1]))
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C > 0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *= conv_factor
if high > format.yaxis_scale:
high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up))
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = corebio.utils.resource_string(__name__,
'_weblogo_template.eps',
__file__).decode()
logo = string.Template(template).substitute(substitutions)
return logo.encode()
#
# End of code modified from weblogo
#########################################################################
#########################################################################
# More code modified from weblogo version 3.4 by Jesse Bloom to allow non-
# alphabetic characters in motifs.
# Copyright (c) 2005 Gavin E. Crooks
# Copyright (c) 2006 John Gilman
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class _my_Motif(corebio.matrix.AlphabeticArray):
"""A two dimensional array where the second dimension is indexed by an
Alphabet. Used to represent sequence motifs and similar information.
Attr:
- alphabet -- An Alphabet
- array -- A numpy array
- name -- The name of this motif (if any) as a string.
- description -- The description, if any.
"""
def __init__(self, alphabet, array=None, dtype=None, name=None,
description=None, scale=None):
corebio.matrix.AlphabeticArray.__init__(self,
(None, alphabet), array, dtype)
self.name = name
self.description = description
self.scale = scale
@property
def alphabet(self):
return self.alphabets[1]
def reindex(self, alphabet):
return _my_Motif(alphabet,
corebio.matrix.AlphabeticArray.reindex(self,
(None,
alphabet)))
# These methods alter self, and therefore do not return a value.
# (Compare to Seq objects, where the data is immutable and
# therefore methods return a new Seq.)
# TODO: Should reindex (above) also act on self?
def reverse(self):
"""Reverse sequence data"""
self.array = self.array[::-1] # view into the origional numpy array.
@staticmethod # TODO: should be classmethod?
def read_transfac(fin, alphabet=None):
"""Parse a sequence matrix from a file.
Returns a tuple of (alphabet, matrix)
"""
items = []
start = True
for line in fin:
if line.isspace() or line[0] == '#':
continue
stuff = line.split()
if start and stuff[0] != 'PO' and stuff[0] != 'P0':
continue
if stuff[0] == 'XX' or stuff[0] == '//':
break
start = False
items.append(stuff)
if len(items) < 2:
raise ValueError("Vacuous file.")
# Is the first line a header line?
header = items.pop(0)
hcols = len(header)
rows = len(items)
cols = len(items[0])
if not(header[0] == 'PO' or header[0] == 'P0' or
hcols == cols-1 or hcols == cols-2):
raise ValueError("Missing header line!")
# Do all lines (except the first) contain the same number of items?
cols = len(items[0])
for i in range(1, len(items)):
if cols != len(items[i]):
raise ValueError("Inconsistant length, row %d: " % i)
# Vertical or horizontal arrangement?
if header[0] == 'PO' or header[0] == 'P0':
header.pop(0)
position_header = True
alphabet_header = True
for h in header:
if not corebio.utils.isint(h):
position_header = False
# allow non-alphabetic if not str.isalpha(h) : alphabet_header = False
if not position_header and not alphabet_header:
raise ValueError("Can't parse header: %s" % str(header))
if position_header and alphabet_header:
raise ValueError("Can't parse header")
# Check row headers
if alphabet_header:
for i, r in enumerate(items):
if not corebio.utils.isint(r[0]) and r[0][0] != 'P':
raise ValueError(
"Expected position as first item on line %d" % i)
r.pop(0)
defacto_alphabet = ''.join(header)
else:
raise ValueError("Can only use alphabet header.")
# a = []
# for i, r in enumerate(items):
# if not ischar(r[0]) and r[0][0] != 'P':
# raise ValueError(
# "Expected position as first item on line %d" % i)
# a.append(r.pop(0))
# defacto_alphabet = ''.join(a)
# Check defacto_alphabet
defacto_alphabet = corebio.seq.Alphabet(defacto_alphabet)
if alphabet:
if not defacto_alphabet.alphabetic(alphabet):
raise ValueError("Incompatible alphabets: %s , %s (defacto)"
% (alphabet, defacto_alphabet))
else:
raise ValueError('Incompatible alphabet')
# alphabets = (unambiguous_rna_alphabet,
# unambiguous_dna_alphabet,
# unambiguous_protein_alphabet,
# )
# for a in alphabets:
# if defacto_alphabet.alphabetic(a):
# alphabet = a
# break
# if not alphabet:
# alphabet = defacto_alphabet
# The last item of each row may be extra cruft. Remove
if len(items[0]) == len(header) + 1:
for r in items:
r.pop()
# items should now be a list of lists of numbers (as strings)
rows = len(items)
cols = len(items[0])
matrix = numpy.zeros((rows, cols), dtype=numpy.float64)
for r in range(rows):
for c in range(cols):
matrix[r, c] = float(items[r][c])
if position_header:
matrix.transpose()
return _my_Motif(defacto_alphabet, matrix).reindex(alphabet)
# End of code modified from weblogo version 3.4
# ==============================================================
def LogoOverlay(sites, overlayfile, overlay, nperline, sitewidth, rmargin,
logoheight, barheight, barspacing, fix_limits=None,
fixlongname=False, overlay_cmap=None, underlay=False,
scalebar=False):
"""Makes overlay for *LogoPlot*.
This function creates colored bars overlay bars showing up to two
properties.
The trick of this function is to create the bars the right
size so they align when they overlay the logo plot.
CALLING VARIABLES:
* *sites* : same as the variable of this name used by *LogoPlot*.
* *overlayfile* is a string giving the name of created PDF file containing
the overlay. It must end in the extension ``.pdf``.
* *overlay* : same as the variable of this name used by *LogoPlot*.
* *nperline* : same as the variable of this name used by *LogoPlot*.
* *sitewidth* is the width of each site in points.
* *rmargin* is the right margin in points.
* *logoheight* is the total height of each logo row in points.
* *barheight* is the total height of each bar in points.
* *barspacing* is the vertical spacing between bars in points.
* *fix_limits* has the same meaning as in *LogoPlot*.
* *fixlongname* has the same meaning as in *LogoPlot*.
* *overlay_cmap* has the same meaning as in *LogoPlot*.
* *underlay* is a bool. If `True`, make an underlay rather than an overlay.
* *scalebar*: if not `False`, is 2-tuple `(scalebarheight, scalebarlabel)`
where `scalebarheight` is in points.
"""
if fix_limits is None:
fix_limits = {}
if os.path.splitext(overlayfile)[1] != '.pdf':
raise ValueError("overlayfile must end in .pdf: %s" % overlayfile)
if not overlay_cmap:
(cmap, mapping_d, mapper) = KyteDoolittleColorMapping()
else:
mapper = pylab.cm.ScalarMappable(cmap=overlay_cmap)
cmap = mapper.get_cmap()
pts_per_inch = 72.0 # to convert between points and inches
# some general properties of the plot
matplotlib.rc('text', usetex=False) # now set to false, version 2.4.0
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('xtick', direction='out')
matplotlib.rc('ytick', direction='out')
matplotlib.rc('axes', linewidth=0.5)
matplotlib.rc('ytick.major', size=3)
matplotlib.rc('xtick.major', size=2.5)
# define sizes (still in points)
colorbar_bmargin = 20 # margin below color bars in points
colorbar_tmargin = 15 # margin above color bars in points
nlines = int(math.ceil(len(sites) / float(nperline)))
lmargin = 25 # left margin in points
barwidth = nperline * sitewidth
figwidth = lmargin + rmargin + barwidth
figheight = (nlines *
(logoheight + len(overlay) * (barheight + barspacing)) +
(barheight + colorbar_bmargin + colorbar_tmargin) +
(int(underlay) * len(overlay) * (barheight + barspacing)))
# set up the figure and axes
pylab.figure(figsize=(figwidth / pts_per_inch,
figheight / pts_per_inch))
# determine property types
prop_types = {}
for (prop_d, shortname, longname) in overlay:
if shortname == longname == 'wildtype':
assert all(((isinstance(prop, str) and len(prop) == 1) for
prop in prop_d.values())),\
'prop_d does not give letters'
proptype = 'wildtype'
(vmin, vmax) = (0, 1) # not used, but need to be assigned
propcategories = None # not used, but needs to be assigned
elif all((isinstance(prop, str) for prop in prop_d.values())):
proptype = 'discrete'
propcategories = list(set(prop_d.values()))
propcategories.sort()
(vmin, vmax) = (0, len(propcategories) - 1)
elif all((isinstance(prop, (int, float)) for prop in prop_d.values())):
proptype = 'continuous'
propcategories = None
(vmin, vmax) = (min(prop_d.values()), max(prop_d.values()))
# If vmin is slightly greater than zero, set it to zero.
# This helps for RSA properties.
if vmin >= 0 and vmin / float(vmax - vmin) < 0.05:
vmin = 0.0
# And if vmax is just a bit less than one, set it to that...
if 0.9 <= vmax <= 1.0:
vmax = 1.0
else:
raise ValueError("Property %s is neither continuous or discrete. "
"Values are:\n%s"
% (shortname, str(prop_d.items())))
if shortname in fix_limits:
(vmin, vmax) = (min(fix_limits[shortname][0]),
max(fix_limits[shortname][0]))
assert vmin < vmax, ("vmin >= vmax, did you incorrectly use "
"fix_vmin and fix_vmax?")
prop_types[shortname] = (proptype, vmin, vmax, propcategories)
assert len(prop_types) == len(overlay), ("Not as many property types as "
"overlays. Did you give the same "
"name (shortname) to multiple "
"properties in the overlay?")
# loop over each line of the multi-lined plot
prop_image = {}
for iline in range(nlines):
isites = sites[iline * nperline: min(len(sites),
(iline + 1) * nperline)]
xlength = len(isites) * sitewidth
logo_ax = pylab.axes([lmargin / figwidth,
(((nlines - iline - 1) *
(logoheight + len(overlay) *
(barspacing + barheight))) /
figheight),
xlength / figwidth,
logoheight / figheight], frameon=False)
logo_ax.yaxis.set_ticks_position('none')
logo_ax.xaxis.set_ticks_position('none')
pylab.yticks([])
pylab.xlim(0.5, len(isites) + 0.5)
pylab.xticks([])
for (iprop, (prop_d, shortname, _longname)) in enumerate(overlay):
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
prop_ax = pylab.axes([lmargin / figwidth,
(((nlines - iline - 1) *
(logoheight + len(overlay) *
(barspacing + barheight)) +
(1 - int(underlay)) *
logoheight +
int(underlay) *
barspacing + iprop *
(barspacing + barheight)) /
figheight),
xlength / figwidth, barheight / figheight],
frameon=(proptype != 'wildtype'))
prop_ax.xaxis.set_ticks_position('none')
pylab.xticks([])
pylab.xlim((0, len(isites)))
pylab.ylim(-0.5, 0.5)
if proptype == 'wildtype':
pylab.yticks([])
prop_ax.yaxis.set_ticks_position('none')
for (isite, site) in enumerate(isites):
pylab.text(isite + 0.5, -0.5, prop_d[site], size=9,
horizontalalignment='center',
family='monospace')
continue
pylab.yticks([0], [shortname], size=8)
prop_ax.yaxis.set_ticks_position('left')
propdata = pylab.zeros(shape=(1, len(isites)))
propdata[:] = pylab.nan # set to nan for all entries
for (isite, site) in enumerate(isites):
if site in prop_d:
if proptype == 'continuous':
propdata[(0, isite)] = prop_d[site]
elif proptype == 'discrete':
propdata[(0, isite)] = (propcategories
.index(prop_d[site]))
else:
raise ValueError('neither continuous nor discrete')
prop_image[shortname] = pylab.imshow(propdata,
interpolation='nearest',
aspect='auto',
extent=[0, len(isites),
0.5, -0.5],
cmap=cmap,
vmin=vmin, vmax=vmax)
pylab.yticks([0], [shortname], size=8)
# set up colorbar axes, then color bars
ncolorbars = len([p for p in prop_types.values() if p[0] != 'wildtype'])
if scalebar:
ncolorbars += 1
if ncolorbars == 1:
colorbarwidth = 0.4
colorbarspacingwidth = 1.0 - colorbarwidth
elif ncolorbars:
# space between color bars is this fraction of bar width
colorbarspacingfrac = 0.5
# width of color bars in fraction of figure width
colorbarwidth = 1.0 / (ncolorbars * (1.0 + colorbarspacingfrac))
# width of color bar spacing in fraction of figure width
colorbarspacingwidth = colorbarwidth * colorbarspacingfrac
# bottom of color bars
ybottom = 1.0 - (colorbar_tmargin + barheight) / figheight
icolorbar = -1
icolorbarshift = 0
while icolorbar < len(overlay):
if icolorbar == -1:
# show scale bar if being used
icolorbar += 1
if scalebar:
(scalebarheight, scalebarlabel) = scalebar
xleft = (colorbarspacingwidth * 0.5 + icolorbar *
(colorbarwidth + colorbarspacingwidth))
ytop = 1 - colorbar_tmargin / figheight
scalebarheightfrac = scalebarheight / figheight
# follow here for fig axes: https://stackoverflow.com/a/5022412
fullfigax = pylab.axes([0, 0, 1, 1], facecolor=(1, 1, 1, 0))
fullfigax.axvline(x=xleft, ymin=ytop - scalebarheightfrac,
ymax=ytop, color='black', linewidth=1.5)
pylab.text(xleft + 0.005, ytop - scalebarheightfrac / 2.0,
scalebarlabel, verticalalignment='center',
horizontalalignment='left',
transform=fullfigax.transAxes)
continue
(prop_d, shortname, longname) = overlay[icolorbar]
icolorbar += 1
(proptype, vmin, vmax, propcategories) = prop_types[shortname]
if proptype == 'wildtype':
icolorbarshift += 1
continue
if shortname == longname or not longname:
propname = shortname
elif fixlongname:
propname = longname
else:
propname = "%s (%s)" % (longname, shortname)
colorbar_ax = pylab.axes([colorbarspacingwidth * 0.5 +
(icolorbar - icolorbarshift -
int(not bool(scalebar))) *
(colorbarwidth + colorbarspacingwidth),
ybottom,
colorbarwidth,
barheight / figheight],
frameon=True)
colorbar_ax.xaxis.set_ticks_position('bottom')
colorbar_ax.yaxis.set_ticks_position('none')
pylab.xticks([])
pylab.yticks([])
pylab.title(propname, size=9)
if proptype == 'continuous':
cb = pylab.colorbar(prop_image[shortname],
cax=colorbar_ax,
orientation='horizontal')
# if range is close to zero to one, manually set tics to 0, 0.5, 1.
# This helps for RSA
if -0.1 <= vmin <= 0 and 1.0 <= vmax <= 1.15:
cb.set_ticks([0, 0.5, 1])
cb.set_ticklabels(['0', '0.5', '1'])
# if it seems plausible, set integer ticks
if 4 < (vmax - vmin) <= 11:
fixedticks = list(range(int(vmin), int(vmax) + 1))
cb.set_ticks(fixedticks)
cb.set_ticklabels([str(itick) for itick in fixedticks])
elif proptype == 'discrete':
cb = pylab.colorbar(prop_image[shortname],
cax=colorbar_ax,
orientation='horizontal',
boundaries=list(range(len(propcategories) + 1)
),
values=list(range(len(propcategories))))
cb.set_ticks([i + 0.5 for i in range(len(propcategories))])
cb.set_ticklabels(propcategories)
else:
raise ValueError("Invalid proptype")
if shortname in fix_limits:
(ticklocs, ticknames) = fix_limits[shortname]
cb.set_ticks(ticklocs)
cb.set_ticklabels(ticknames)
# save the plot
pylab.savefig(overlayfile, transparent=True)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
gpfreitas/bokeh | bokeh/tests/test_sources.py | 26 | 3245 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSources(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
kod3r/sklearn-pmml | sklearn_pmml/convert/utils.py | 3 | 6372 | from functools import partial
from sklearn_pmml import pmml
from pyxb.utils.domutils import BindingDOMSupport as bds
import numpy as np
estimator_to_converter = {}
def find_converter(estimator):
# TODO: do the search here
return estimator_to_converter.get(estimator.__class__, None)
def pmml_row(**columns):
"""
Creates pmml.row element with columns
:param columns: key-value pairs to be inserted into the row
:return: pmml.row element
"""
r = pmml.row()
for name, value in columns.items():
el = bds().createChildElement(name)
bds().appendTextChild(value, el)
r.append(el)
return r
class DerivedFeatureTransformations(object):
"""
A helper for building Derived Feature transformations. Creates both transformation and the DerivedFeature content.
Typical usage of the methods:
DerivedFeature(
RealNumericFeature('my_derived_feature'),
**DerivedFeatureTransformations.field_in_list('input_feature', ['A', 'B', 'C'])
)
"""
TRANSFORMATION = 'transformation'
FUNCTION = 'function'
@staticmethod
def field_in_list(field, values):
mv = pmml.MapValues(outputColumn='output', defaultValue=0)
mv.append(pmml.FieldColumnPair(field=field, column='input'))
it = pmml.InlineTable()
for v in values:
it.append(pmml_row(input=v, output=1))
mv.append(it)
return {
DerivedFeatureTransformations.TRANSFORMATION: mv,
DerivedFeatureTransformations.FUNCTION: lambda df: reduce(np.logical_or, [df[field] == _ for _ in values])
}
@staticmethod
def field_not_in_list(field, values):
mv = pmml.MapValues(outputColumn='output', defaultValue=1)
mv.append(pmml.FieldColumnPair(field=field, column='input'))
it = pmml.InlineTable()
for v in values:
it.append(pmml_row(input=v, output=0))
mv.append(it)
return {
DerivedFeatureTransformations.TRANSFORMATION: mv,
DerivedFeatureTransformations.FUNCTION: lambda df: reduce(np.logical_and, [df[field] != _ for _ in values])
}
@staticmethod
def map_values(field, value_map, default_value):
mv = pmml.MapValues(outputColumn='output', default_value=default_value)
mv.append(pmml.FieldColumnPair(field=field, column='input'))
it = pmml.InlineTable()
for k, v in value_map.items():
it.append(pmml_row(input=k, output=v))
mv.append(it)
return {
DerivedFeatureTransformations.TRANSFORMATION: mv,
DerivedFeatureTransformations.FUNCTION:
lambda df: np.vectorize(partial(value_map.get, default_value))(df[field])
}
@staticmethod
def arithmetics(tree):
"""
Takes an arithmetic operations tree (Lisp-styled) as an input
"""
def basic_function(func_name, args):
expr = pmml.Apply(function=func_name)
for a in args:
expr.append(a)
return expr
def mod_function(args):
expr = pmml.Apply(function='-')
expr.append(args[0])
mul = pmml.Apply(function='*')
mul.append(args[1])
floor = pmml.Apply(function='floor')
mul.append(floor)
div = pmml.Apply(function='/')
floor.append(div)
div.append(args[0])
div.append(args[1])
return expr
# TODO: test me
def greedy_evaluation(node):
if isinstance(node, str):
# field reference
return (lambda df: df[node]), pmml.FieldRef(field=node)
elif isinstance(node, (tuple, list)):
# eval arguments
args = map(greedy_evaluation, node[1:])
functions = {
'*': lambda df: np.multiply(*[_[0](df) for _ in args]),
'-': lambda df: np.subtract(*[_[0](df) for _ in args]),
'+': lambda df: np.add(*[_[0](df) for _ in args]),
'/': lambda df: np.divide(*[_[0](df) for _ in args]),
'%': lambda df: np.mod(*[_[0](df) for _ in args]),
}
assert isinstance(node[0], str), 'First element should be a code of operation'
assert node[0] in functions, 'Unknown function code {}. Supported codes: {}'.format(node[0], functions.keys())
expr = {
'*': partial(basic_function, '*'),
'-': partial(basic_function, '-'),
'+': partial(basic_function, '+'),
'/': partial(basic_function, '/'),
'%': mod_function
}.get(node[0])([a[1] for a in args])
func = functions[node[0]]
return func, expr
else:
# numeric terminal
return lambda df: node, pmml.Constant(node, dataType='double')
function, transformation = greedy_evaluation(tree)
return {
DerivedFeatureTransformations.TRANSFORMATION: transformation,
DerivedFeatureTransformations.FUNCTION: function
}
@staticmethod
def replace_value(field, original, replacement):
if original is not None:
transformation = pmml.Apply(function='if')
cond = pmml.Apply(function='equals')
cond.append(pmml.FieldRef(field=field))
cond.append(pmml.Constant(original))
transformation.append(pmml.Constant(replacement))
transformation.append(pmml.FieldRef(field=field))
return {
DerivedFeatureTransformations.TRANSFORMATION: transformation,
DerivedFeatureTransformations.FUNCTION: lambda df: np.where(df[field] == original, replacement, df[field])
}
else:
transformation = pmml.Apply(function='+', mapMissingTo=replacement)
transformation.append(pmml.Constant(0))
transformation.append(pmml.FieldRef(field=field))
return {
DerivedFeatureTransformations.TRANSFORMATION: transformation,
DerivedFeatureTransformations.FUNCTION: lambda df: np.where(df[field].isnull(), replacement, df[field])
}
| mit |
ndingwall/scikit-learn | sklearn/cluster/_dbscan.py | 8 | 16138 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..utils.validation import _check_sample_weight, _deprecate_positional_args
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
@_deprecate_positional_args
def dbscan(X, eps=0.5, *, min_samples=5, metric='minkowski',
metric_params=None, algorithm='auto', leaf_size=30, p=2,
sample_weight=None, n_jobs=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str or callable, default='minkowski'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
core_samples : ndarray of shape (n_core_samples,)
Indices of core samples.
labels : ndarray of shape (n_samples,)
Cluster labels for each point. Noisy samples are given the label -1.
See Also
--------
DBSCAN : An estimator interface for this clustering algorithm.
OPTICS : A similar estimator interface clustering at multiple values of
eps. Our implementation is optimized for memory usage.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:func:`cluster.optics <sklearn.cluster.optics>` provides a similar
clustering with lower memory usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
DBSCAN revisited, revisited: why and how you should (still) use DBSCAN.
ACM Transactions on Database Systems (TODS), 42(3), 19.
"""
est = DBSCAN(eps=eps, min_samples=min_samples, metric=metric,
metric_params=metric_params, algorithm=algorithm,
leaf_size=leaf_size, p=p, n_jobs=n_jobs)
est.fit(X, sample_weight=sample_weight)
return est.core_sample_indices_, est.labels_
class DBSCAN(ClusterMixin, BaseEstimator):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`, in which
case only "nonzero" elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=None
The power of the Minkowski metric to be used to calculate distance
between points. If None, then ``p=2`` (equivalent to the Euclidean
distance).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : ndarray of shape (n_core_samples,)
Indices of core samples.
components_ : ndarray of shape (n_core_samples, n_features)
Copy of each core sample found by training.
labels_ : ndarray of shape (n_samples)
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Examples
--------
>>> from sklearn.cluster import DBSCAN
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 2], [2, 3],
... [8, 7], [8, 8], [25, 80]])
>>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
>>> clustering.labels_
array([ 0, 0, 0, 1, 1, -1])
>>> clustering
DBSCAN(eps=3, min_samples=2)
See Also
--------
OPTICS : A similar clustering at multiple values of eps. Our implementation
is optimized for memory usage.
Notes
-----
For an example, see :ref:`examples/cluster/plot_dbscan.py
<sphx_glr_auto_examples_cluster_plot_dbscan.py>`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`cluster.OPTICS` provides a similar clustering with lower memory
usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
DBSCAN revisited, revisited: why and how you should (still) use DBSCAN.
ACM Transactions on Database Systems (TODS), 42(3), 19.
"""
@_deprecate_positional_args
def __init__(self, eps=0.5, *, min_samples=5, metric='euclidean',
metric_params=None, algorithm='auto', leaf_size=30, p=None,
n_jobs=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse='csr')
if not self.eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == 'precomputed' and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
with warnings.catch_warnings():
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal()) # XXX: modifies X's internals in-place
neighbors_model = NearestNeighbors(
radius=self.eps, algorithm=self.algorithm,
leaf_size=self.leaf_size, metric=self.metric,
metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples,
dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix,
and return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Geneva_inst_Rot/Geneva_inst_Rot_6/fullgrid/UV1.py | 31 | 9315 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
saiwing-yeung/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/plotscriptgenerator/legend.py | 3 | 6968 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from mantid.plots.legend import LegendProperties, convert_color_to_hex
from workbench.plotting.plotscriptgenerator.utils import convert_args_to_string
# Default values of all options that are accessible via the legend tab in the plot settings.
mpl_default_kwargs = {
'visible': True,
'title': '',
'background_color': convert_color_to_hex(rcParams['axes.facecolor']), # inherits from axes by default
'edge_color': convert_color_to_hex(rcParams['legend.edgecolor']),
'transparency': rcParams['legend.framealpha'],
'entries_font': 'DejaVu Sans',
'entries_size': rcParams['legend.fontsize'],
'entries_color': '#000000',
'title_font': 'DejaVu Sans',
'title_size': rcParams['axes.labelsize'], # Uses axes size by default
'title_color': '#000000',
'marker_size': rcParams['legend.handlelength'],
'box_visible': rcParams['legend.frameon'],
'shadow': rcParams['legend.shadow'],
'round_edges': rcParams['legend.fancybox'],
'columns': 1,
'column_spacing': rcParams['legend.columnspacing'],
'label_spacing': rcParams['legend.labelspacing'],
'marker_position': "Left of Entries",
'markers': rcParams['legend.numpoints'],
'border_padding': rcParams['legend.borderpad'],
'marker_label_padding': rcParams['legend.handletextpad']
}
# Dictionary to convert from the mantid legend interface to matplotlib legend argument names.
MANTID_TO_MPL = {
'background_color': 'facecolor',
'edge_color': 'edgecolor',
'transparency': 'framealpha',
'entries_size': 'fontsize',
'columns': 'ncol',
'markers': 'numpoints',
'marker_position': 'markerfirst',
'box_visible': 'frameon',
'round_edges': 'fancybox',
'shadow': 'shadow',
'title': 'title',
'border_padding': 'borderpad',
'label_spacing': 'labelspacing',
'marker_size': 'handlelength',
'marker_label_padding': 'handletextpad',
'column_spacing': 'columnspacing'
}
def generate_legend_commands(legend):
"""
Generates a string containing a comma separated list of kwargs to set legend properties.
"""
kwargs = get_legend_command_kwargs(legend)
return convert_args_to_string([], kwargs)
def generate_title_font_commands(legend, legend_object_var):
"""
Generate commands for setting properties for the legend title font.
"""
title_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'title_font' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontname('" + kwargs['title_font'] + "')")
if 'title_color' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_color('" + kwargs['title_color'] + "')")
if 'title_size' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontsize('" + str(kwargs['title_size']) + "')")
return title_commands
def generate_label_font_commands(legend, legend_object_var):
"""
Generate python commands for setting the legend text label properties. The size is not present here because it is
already included in the list of legend properties.
"""
label_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'entries_font' in kwargs:
label_commands.append("[label.set_fontname('" + kwargs['entries_font']
+ "') for label in " + legend_object_var + ".get_texts()]")
if 'entries_color' in kwargs:
label_commands.append("[label.set_color('" + kwargs['entries_color']
+ "') for label in " + legend_object_var + ".get_texts()]")
return label_commands
def generate_visible_command(legend, legend_object_var):
"""
Returns a command to set the visibility of the legend if it's different to the default value.
It's returned as a list for convenience, so it can be added to the end of a list without checking if it's empty.
"""
visible_command = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'visible' in kwargs:
visible_command.append(legend_object_var + ".set_visible(" + str(kwargs['visible']) + ")")
return visible_command
def get_legend_command_kwargs(legend):
"""
Returns a list of matplotlib legend kwargs, removing any that are default values.
"""
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
# Convert the kwargs to the matplotlib ones.
return get_mpl_kwargs(kwargs)
def get_mpl_kwargs(kwargs):
"""
Keep only matplotlib legend kwargs, and convert the keys to matplotlib compatible ones.
"""
mpl_kwargs = {}
for key, value in kwargs.items():
if key in MANTID_TO_MPL:
mpl_kwargs[MANTID_TO_MPL[key]] = value
# The markerfirst kwarg is a boolean in matplotlib, so need to convert it.
if 'markerfirst' in mpl_kwargs:
mpl_kwargs['markerfirst'] = mpl_kwargs['markerfirst'] == "Left of Entries"
return mpl_kwargs
def _remove_kwargs_if_default(kwargs):
"""
Remove kwargs from the given dict if they're the default values
"""
for kwarg, default_value in mpl_default_kwargs.items():
if kwargs[kwarg] == default_value:
kwargs.pop(kwarg)
# Font size defaults are string values (e.g. 'medium', 'large', 'x-large'), so we need to convert the defaults to
# point sizes before comparing.
if 'title_size' in kwargs:
if convert_to_point_size(kwargs['title_size']) == convert_to_point_size(mpl_default_kwargs['title_size']):
kwargs.pop('title_size')
if 'entries_size' in kwargs:
if convert_to_point_size(kwargs['entries_size']) == convert_to_point_size(mpl_default_kwargs['entries_size']):
kwargs.pop('entries_size')
# Hex values of colours may not be the same case, so convert to lower before comparing.
if 'background_color' in kwargs:
if kwargs['background_color'].lower() == mpl_default_kwargs['background_color'].lower():
kwargs.pop('background_color')
if 'edge_color' in kwargs:
if kwargs['edge_color'].lower() == mpl_default_kwargs['edge_color'].lower():
kwargs.pop('edge_color')
def convert_to_point_size(font_size):
"""
Convert font size (may be int or string, e.g. 'medium', 'large', ...) to point size.
"""
font = FontProperties()
font.set_size(font_size)
return font.get_size_in_points()
| gpl-3.0 |
zaxtax/scikit-learn | sklearn/datasets/tests/test_kddcup99.py | 59 | 1336 | """Test kddcup99 loader. Only 'percent10' mode is tested, as the full data
is too big to use in unit-testing.
The test is skipped if the data wasn't previously fetched and saved to
scikit-learn data folder.
"""
import errno
from sklearn.datasets import fetch_kddcup99
from sklearn.utils.testing import assert_equal, SkipTest
def test_percent10():
try:
data = fetch_kddcup99(download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("kddcup99 dataset can not be loaded.")
assert_equal(data.data.shape, (494021, 41))
assert_equal(data.target.shape, (494021,))
data_shuffled = fetch_kddcup99(shuffle=True, random_state=0)
assert_equal(data.data.shape, data_shuffled.data.shape)
assert_equal(data.target.shape, data_shuffled.target.shape)
data = fetch_kddcup99('SA')
assert_equal(data.data.shape, (100655, 41))
assert_equal(data.target.shape, (100655,))
data = fetch_kddcup99('SF')
assert_equal(data.data.shape, (73237, 4))
assert_equal(data.target.shape, (73237,))
data = fetch_kddcup99('http')
assert_equal(data.data.shape, (58725, 3))
assert_equal(data.target.shape, (58725,))
data = fetch_kddcup99('smtp')
assert_equal(data.data.shape, (9571, 3))
assert_equal(data.target.shape, (9571,))
| bsd-3-clause |
witcxc/scipy | scipy/signal/signaltools.py | 2 | 81684 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, isscalar, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> lena = misc.lena()
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(lena, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(lena, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> lena = misc.lena() - misc.lena().mean()
>>> template = np.copy(lena[235:295, 310:370]) # right eye
>>> template -= template.mean()
>>> lena = lena + np.random.randn(*lena.shape) * 50 # add noise
>>> corr = signal.correlate2d(lena, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(lena, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input samples is large and prime, see
`scipy.fftpack.fft`.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| bsd-3-clause |
cainiaocome/scikit-learn | sklearn/linear_model/coordinate_descent.py | 42 | 73973 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
ifarup/colourlab | colourlab/data.py | 1 | 36069 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""data: Colour data, part of the colourlab package
Copyright (C) 2013-2017 Ivar Farup
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import numpy as np
import inspect
from matplotlib.patches import Ellipse
from . import space
# =============================================================================
# Colour data
# =============================================================================
class Points:
"""
Class for keeping colour data in various colour spaces and shapes.
"""
def __init__(self, sp, ndata):
"""
Construct new instance and set colour space and data.
Parameters
----------
sp : space.Space
The colour space for the given instanisiation data.
ndata : ndarray
The colour data in the given space.
"""
self.data = None
self.sh = None
self.flattened_XYZ = None
self.set(sp, ndata)
def flatten(self, ndata):
"""
Shape the data so that is becomes an PxC matrix or C vector.
The data should be of the shape M x ... x N x C, where C is the
number of colour channels. Returns the shaped data as a P x C
matrix where P = M x ... x N, as well as the shape of the input
data. Get back to original shape by reshape(data, shape).
Parameters
----------
ndata : ndarray
M x ... x N x C array of colour data
Returns
-------
ndata : ndarray
P x C array of colour data, P = M * ... * N
"""
sh = np.shape(ndata)
sh_array = np.array(sh)
P_data = np.prod(sh_array[:len(sh) - 1])
C_data = sh[len(sh) - 1]
return np.reshape(ndata, [P_data, C_data])
def set(self, sp, ndata):
"""
Set colour space and data.
A new dictionary is constructed, and the data are added in the
provided colour space, as well as in the XYZ colour space
(using the SpaceXYZ class).
Parameters
----------
sp : space.Space
The colour space for the given instanisiation data.
ndata : ndarray
The colour data in the given space.
"""
ndata = np.array(ndata)
self.data = dict()
self.data[sp] = ndata
self.sh = ndata.shape
flattened_data = self.flatten(ndata)
if sp == space.xyz:
self.flattened_XYZ = flattened_data
else:
self.flattened_XYZ = sp.to_XYZ(flattened_data)
self.data[space.xyz] = np.reshape(self.flattened_XYZ, self.sh)
def get(self, sp):
"""
Return colour data in required colour space.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space for the returned data.
Returns
-------
ndata : ndarray
The colour data in the given colour space.
"""
if sp in self.data:
return self.data[sp]
else:
flattened_data = sp.from_XYZ(self.flattened_XYZ)
ndata = np.reshape(flattened_data, self.sh)
self.data[sp] = ndata
return ndata
def get_flattened(self, sp):
"""
Return colour data in required colour space in PxC format.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space for the returned data.
Returns
-------
ndata : ndarray
The flattend colour data in the given colour space.
"""
return self.flatten(self.get(sp))
def new_white_point(self, sp, from_white, to_white):
"""
Return new data set with new white point.
The transformation is done using the von Kries transformation
in the given colour space.
Parameters
----------
sp : space.Space
The colour space for the von Kries transformation.
from_white : data.Points
The white point of the current data set.
to_white : data.Points
The white point of the new data set.
Returns
-------
data : data.Points
The new colour data with changed white point.
"""
wh_in = from_white.get(sp)
wh_out = to_white.get(sp)
von_kries_mat = np.array([[wh_out[0] / wh_in[0], 0, 0],
[0, wh_out[1] / wh_in[1], 0],
[0, 0, wh_out[2] / wh_in[2]]])
return Points(sp, self.get(space.TransformLinear(sp, von_kries_mat)))
class Vectors:
"""
Class for keeping contravariant vector data in various colour spaces.
"""
def __init__(self, sp, vectors_ndata, points_data):
"""
Construct new instance and set colour space and data.
Parameters
----------
sp: space.Space
The colour space for the given vector data
metrics_ndata : ndarray
The tensor data in the given colour space at the given points.
points_data : space.Points
The colour points for the given vector data.
"""
self.points = None
self.vectors = None
self.sh = None
self.flattened_XYZ = None
self.set(sp, vectors_ndata, points_data)
def flatten(self, ndata):
"""
Shape the data so that is becomes an PxC matrix or C vector.
The data should be of the shape M x ... x N x C, where C is
the number of colour channels. Returns the shaped data as a P
x C matrix where P = M x ... x N, as well as the shape of the
input data. Get back to original shape by reshape(data,
shape).
Parameters
----------
ndata : ndarray
M x ... x N x C array of colour data
Returns
-------
ndata : ndarray
P x C array of colour data, P = M * ... * N
"""
sh = np.shape(ndata)
sh_array = np.array(sh)
P_data = np.prod(sh_array[:len(sh) - 1])
C_data = sh[len(sh) - 1]
return np.reshape(ndata, [P_data, C_data])
def set(self, sp, vectors_ndata, points_data):
"""
Set colour sp, points, and vectorss data.
The points_data are taken care already of the type Points. A new
dictionary is constructed, and the vectors_ndata are added in
the provided colour space, as well as in the XYZ colour space
(using the SpaceXYZ class).
Parameters
----------
sp : space.Space
The colour space for the given tensor data.
vectors_ndata : ndarray
The vector data in the given colour space at the given points.
points_data : data.Points
The colour points for the given tensor data.
"""
self.points = points_data
self.vectors = dict()
vectors_ndata = np.array(vectors_ndata)
self.vectors[sp] = vectors_ndata
self.sh = vectors_ndata.shape
flattened_data = self.flatten(vectors_ndata)
if sp == space.xyz:
self.flattened_XYZ = flattened_data
else:
self.flattened_XYZ = sp.vectors_to_XYZ(self.points, flattened_data)
self.vectors[space.xyz] = np.reshape(self.flattened_XYZ, self.sh)
def get(self, sp):
"""
Return colour vector data in required colour space.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space for the returned data.
Returns
-------
ndata : ndarray
The colour vector data in the given colour space.
"""
if sp in self.vectors:
return self.vectors[sp]
else:
flattened_data = sp.vectors_from_XYZ(self.points,
self.flattened_XYZ)
ndata = np.reshape(flattened_data, self.sh)
self.vectors[sp] = ndata
return ndata
def get_flattened(self, sp):
"""
Return colour vector data in required colour space in PxC format.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space for the returned data.
Returns
-------
ndata : ndarray
The flattend colour vector data in the given colour space.
"""
return self.flatten(self.get(sp))
class Tensors:
"""
Class for keeping colour metric data in various colour spaces.
"""
# Cross sectional planes for ellipses
plane_01 = np.s_[0:2]
plane_12 = np.s_[1:3]
plane_10 = np.s_[1::-1]
plane_20 = np.s_[2::-2]
plane_xy = plane_01
plane_ab = plane_12
plane_aL = plane_10
plane_bL = plane_20
def __init__(self, sp, metrics_ndata, points_data):
"""
Construct new instance and set colour space and data.
Parameters
----------
sp : space.Space
The colour space for the given tensor data.
metrics_ndata : ndarray
The tensor data in the given colour space at the given points.
points_data : data.Points
The colour points for the given tensor data.
"""
self.points = None
self.metrics = None
self.sh = None
self.flattened_XYZ = None
self.set(sp, metrics_ndata, points_data)
def flatten(self, ndata):
"""
Shape the data so that is becomes an PxCxC matrix or CxC matrix
The data should be of the shape M x ... x N x C x D, where C is the
number of colour channels. Returns the shaped data as a P x C
matrix where P = M x ... x N, as well as the shape of the input
data. Get back to original shape by reshape(data, shape).
Parameters
----------
ndata : ndarray
M x ... x N x C x C array of colour metrics
Returns
-------
ndata : ndarray
P x C x C array of colour metrics, P = M * ... * N
"""
sh = np.shape(ndata)
sh_array = np.array(sh)
P_data = np.prod(sh_array[:len(sh) - 2])
C_data = sh[len(sh) - 2:]
return np.reshape(ndata, [P_data, C_data[0], C_data[1]])
def set(self, sp, metrics_ndata, points_data):
"""
Set colour sp, points, and metrics data.
The points_data are taken care already of the type Points. A new
dictionary is constructed, and the metrics_ndata are added in
the provided colour space, as well as in the XYZ colour space
(using thespace.SpaceXYZ class).
Parameters
----------
sp : space.Space
The colour space for the given tensor data.
metrics_ndata : ndarray
The tensor data in the given colour space at the given points.
points_data : data.Points
The colour points for the given tensor data.
"""
self.points = points_data
self.metrics = dict()
self.sh = metrics_ndata.shape
self.metrics[sp] = metrics_ndata
flattened_data = self.flatten(metrics_ndata)
if sp == space.xyz:
self.flattened_XYZ = flattened_data
else:
self.flattened_XYZ = sp.metrics_to_XYZ(points_data, flattened_data)
self.metrics[space.xyz] = np.reshape(self.flattened_XYZ, self.sh)
def get(self, sp):
"""
Return metric data in required colour space.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space in which to return the tensor data.
Returns
-------
tensors : ndarray
Array of tensors in the given colour space.
"""
if sp in self.metrics:
return self.metrics[sp]
else:
flattened_metrics = sp.metrics_from_XYZ(self.points,
self.flattened_XYZ)
metrics_ndata = np.reshape(flattened_metrics, self.sh)
self.metrics[sp] = metrics_ndata
return metrics_ndata
def get_flattened(self, sp):
"""
Return colour data in required colour space in PxC format.
If the data do not currently exist in the required colour
space, the necessary colour conversion will take place, and
the results stored in the object or future use.
Parameters
----------
sp : space.Space
The colour space for the returned data.
Returns
-------
ndata : ndarray
The flattend colour data in the given colour space.
"""
return self.flatten(self.get(sp))
def get_ellipse_parameters(self, sp, plane=plane_xy, scale=1):
"""
Return ellipse parameters a, b, theta in the required plane.
The plane is in the given space. For now, plane is represented
by a slice giving the correct range for the arrays. Should
perhaps be changed in the future.
Parameters
----------
sp : space.Space
The space in which to give the ellipse parameters.
plane : slice
The principal plan for the ellipsoid cross sections.
scale : float
The scaling (magnification) factor for the ellipses.
Returns
-------
a_b_theta : ndarray
N x 3 array of a, b, theta ellipse parameters.
"""
metrics = self.get_flattened(sp).copy()
points = self.points.get_flattened(sp).copy()
a_b_theta = np.zeros(np.shape(points))
metrics = metrics[..., plane, plane]
points = points[:, plane]
for i in range(np.shape(metrics)[0]):
g11 = metrics[i, 0, 0]
g22 = metrics[i, 1, 1]
g12 = metrics[i, 0, 1]
theta = np.arctan2(2*g12, g11 - g22) * 0.5
if theta == 0:
a = 1 / np.sqrt(g11)
b = 1 / np.sqrt(g22)
else:
a = 1 / np.sqrt(g22 + g12 / np.tan(theta))
b = 1 / np.sqrt(g11 - g12 / np.tan(theta))
a_b_theta[i, 0] = a * scale
a_b_theta[i, 1] = b * scale
a_b_theta[i, 2] = theta
return a_b_theta
def get_ellipses(self, sp, plane=plane_xy, scale=1):
"""
Return Ellipse objects in the required plane of the given space.
For now, plane is represented by a slice giving the correct
range for the arrays. Should perhaps be changed in the future.
Parameters
----------
sp : space.Space
The space in which to give the ellipse parameters.
plane : slice
The principal plan for the ellipsoid cross sections.
scale : float
The scaling (magnification) factor for the ellipses.
Returns
-------
ellipses : list
List of Ellipse objects.
"""
a_b_theta = self.get_ellipse_parameters(sp, plane, scale)
points = self.points.get_flattened(sp).copy()
points = points[:, plane]
ells = []
for i in range(np.shape(a_b_theta)[0]):
ells.append(Ellipse(points[i],
width=2 * a_b_theta[i, 0],
height=2 * a_b_theta[i, 1],
angle=a_b_theta[i, 2] * 180 / np.pi))
return ells
def inner(self, sp, vec1, vec2):
"""
Return the inner product of the two vectors in the given space.
The result should in theory be invariant with respect to the
colour space.
Parameters
----------
sp : space.Space
The space in which to compute the inner product
vec1: Vectors
The first vector
vec2: Vectors
The second vector
Returns
-------
inner : ndarray
The inner products (scalars)
"""
return np.einsum('...ij,...i,...j', self.get(sp),
vec1.get(sp), vec2.get(sp))
def norm_sq(self, sp, vec):
"""
Return the squared norm of a vector data set given the metric tensor.
The vector set and the tensor data set must have corresponding
dimensions.
Parameters
----------
sp : space.Space
The space in which to compute the inner product
vec: Vectors
The vectors
Returns
-------
norms: ndarray
Array with numerical (scalar) values of the squared norm.
"""
return self.inner(sp, vec, vec)
def norm(self, sp, vec):
"""
Compute the norm of a vector data set with a given metric tensor.
The vector set and the tensor data set must have corresponding
dimensions.
Parameters
----------
sp : space.Space
The space in which to compute the inner product
vec: Vectors
The vectors
Returns
-------
norms: ndarray
Array with numerical (scalar) values of the norm.
"""
return np.sqrt(self.inner(sp, vec, vec))
# =============================================================================
# Colour data sets
# =============================================================================
def resource_path(relative):
"""
Extend relative path to full path (mainly for setuptools integration).
Parameters
----------
relative : string
The relative path name.
Returns
-------
absolute : string
The absolute path name.
"""
return os.path.dirname(
os.path.abspath(
inspect.getsourcefile(resource_path))) + '/' + relative
def read_csv_file(filename, pad=-np.inf):
"""
Read a CSV file and return pylab array.
Parameters
----------
filename : string
Name of the CSV file to read
pad : float
Value to pad for missing values.
Returns
-------
csv_array : ndarray
The content of the file plus padding.
"""
f = open(resource_path(filename))
data = f.readlines()
f.close()
for i in range(len(data)):
data[i] = data[i].split(',')
for j in range(len(data[i])):
if data[i][j].strip() == '':
data[i][j] = pad
else:
data[i][j] = float(data[i][j])
return np.array(data)
# White points:
white_A = Points(space.xyz, space.Space.white_A)
white_B = Points(space.xyz, space.Space.white_B)
white_C = Points(space.xyz, space.Space.white_C)
white_D50 = Points(space.xyz, space.Space.white_D50)
white_D55 = Points(space.xyz, space.Space.white_D55)
white_D65 = Points(space.xyz, space.Space.white_D65)
white_D75 = Points(space.xyz, space.Space.white_D75)
white_E = Points(space.xyz, space.Space.white_E)
white_F2 = Points(space.xyz, space.Space.white_F2)
white_F7 = Points(space.xyz, space.Space.white_F7)
white_F11 = Points(space.xyz, space.Space.white_F11)
def d_XYZ_31():
"""
Read CIE XYZ 1931 functions.
Returns
-------
xyz_31 : data.Points
The XYZ 1931 colour matching functions.
"""
xyz_ = read_csv_file('colour_data/ciexyz31_1.csv')
return Points(space.xyz, xyz_[:, 1:])
def d_XYZ_64():
"""
Read CIE XYZ 1964 functions.
Returns
-------
xyz_64 : data.Points
The XYZ 1964 colour matching functions.
"""
xyz_ = read_csv_file('colour_data/ciexyz64_1.csv')
return Points(space.xyz, xyz_[:, 1:])
def d_Melgosa():
"""
The data points for the Melgosa Ellipsoids (RIT-DuPont).
Copied verbatim from pdf of CRA paper. Uses the ellipsoids fitted
in CIELAB and returns Tensors.
Returns
-------
d_Melgosa : data.Points
The centre points of Melgosa's RIT-DuPont ellipsoids.
"""
m_a = np.array([-1.403, -16.374, -0.782, -27.549, 12.606, 12.153,
35.646, 1.937, -10.011, -0.453, -30.732, 21.121,
-33.638, -13.440, 25.237, 31.509, 6.826, 0.307,
18.226])
m_b = np.array([-27.810, -11.263, 1.049, 2.374, 20.571, -13.079,
21.403, 35.638, 13.281, 0.421, -5.030, 17.804,
-5.012, -25.897, 3.409, -0.183, -31.146, 0.214,
79.894])
m_L = np.array([35.338, 50.259, 59.334, 55.618, 62.928, 46.389,
42.315, 78.023, 64.938, 14.140, 68.678, 28.893,
31.683, 59.904, 17.357, 58.109, 30.186, 83.481,
76.057])
m_Lab = np.concatenate(([m_L], [m_a], [m_b]), axis=0).T
return Points(space.cielab, m_Lab)
def d_Munsell(dataset='real'):
"""
The Munsell renotation data under illuminant C for the 2 degree observer.
Parameters
----------
dataset : string
Which data set. Either 'all', 'real', or '1929'. See
http://www.cis.rit.edu/research/mcsl2/online/munsell.php
for details.
Returns
-------
d_Munsell : data.Points
The Munsell colours.
munsell_names : list
The standard Munsell value names (H, V, C).
munsell_lab : ndarray
Numeric version of the Munsell values names in a normalised
Lab type coordinate system. Follows the layout of McCann
J. Elect. Imag. 1999
"""
if dataset == 'all' or dataset == 'real' or dataset == '1929':
fname = 'colour_data/' + dataset + '.dat'
else:
raise RuntimeError('Non-existing Munsell data set: ' + str(dataset))
infile = open(resource_path(fname), 'r')
data = infile.readlines()
infile.close()
data = data[1:]
for i in range(len(data)):
data[i] = data[i].split()
munsell_names = list(data)
for i in range(len(munsell_names)):
munsell_names[i] = munsell_names[i][0:3]
munsell_hlc = list(munsell_names)
for i in range(len(data)):
data[i] = data[i][3:]
for j in range(len(data[i])):
data[i][j] = float(data[i][j])
data = np.array(data)
data[:, 2] = data[:, 2] / 100.
data[data == 0] = 1e-16
hue_list = ['10RP',
'2.5R', '5R', '7.5R', '10R',
'2.5YR', '5YR', '7.5YR', '10YR',
'2.5Y', '5Y', '7.5Y', '10Y',
'2.5GY', '5GY', '7.5GY', '10GY',
'2.5G', '5G', '7.5G', '10G',
'2.5BG', '5BG', '7.5BG', '10BG',
'2.5B', '5B', '7.5B', '10B',
'2.5PB', '5PB', '7.5PB', '10PB',
'2.5P', '5P', '7.5P', '10P',
'2.5RP', '5RP', '7.5RP']
hue_lut = dict(zip(hue_list, 2 * np.pi * np.arange(len(hue_list)) /
float(len(hue_list))))
for i in range(len(munsell_hlc)):
munsell_hlc[i][0] = hue_lut[munsell_hlc[i][0]]
for j in range(3):
munsell_hlc[i][j] = float(munsell_hlc[i][j])
munsell_hlc = np.array(munsell_hlc)
munsell_hlc[:, 1] = munsell_hlc[:, 1] / 10.
munsell_hlc[:, 2] = munsell_hlc[:, 2] / 20.
munsell_lab = np.zeros(np.shape(munsell_hlc))
munsell_lab[:, 0] = munsell_hlc[:, 1]
munsell_lab[:, 1] = munsell_hlc[:, 2] * np.cos(munsell_hlc[:, 0])
munsell_lab[:, 2] = munsell_hlc[:, 2] * np.sin(munsell_hlc[:, 0])
return Points(space.xyY, data), munsell_names, munsell_lab
def d_regular(sp, x_val, y_val, z_val):
"""
Build regular data set of colour data in the given colour space.
x_val, y_val, and z_val should be one-dimensional arrays.
Parameters
----------
sp : space.Space
The given colour space.
x_val : ndarray
Array of x values.
y_val : ndarray
Array of y values.
z_val : ndarray
Array of z values.
Returns
-------
data : data.Points
Regular structure of colour data in the given colour space.
"""
x_len = np.shape(x_val)[0]
y_len = np.shape(y_val)[0]
z_len = np.shape(z_val)[0]
tot_len = x_len * y_len * z_len
ndata = np.zeros((tot_len, 3))
ell = 0
for i in range(x_len):
for j in range(y_len):
for k in range(z_len):
ndata[ell, 0] = x_val[i]
ndata[ell, 1] = y_val[j]
ndata[ell, 2] = z_val[k]
ell = ell + 1
return Points(sp, ndata)
# TODO:
#
# Colour data sets, as needed (instances of Points):
# patches_Munsell ++
# patches_OSA ++ ???
# patches_Colour Checker ++
# =============================================================================
# Metric data sets
# =============================================================================
def g_MacAdam():
"""
MacAdam ellipses (defined in xy, extended arbitrarily to xyY).
Arbitrarily uses Y=0.4 and g33 = 1e3 for extension to 3D.
Returns
-------
MacAdam : Tensors
The metric tensors corresponding to the MacAdam ellipsoids.
"""
from scipy.io import loadmat
rawdata = loadmat(resource_path('tensor_data/macdata(xyabtheta).mat'))
rawdata = rawdata['unnamed']
xyY = rawdata[:, 0:3].copy()
xyY[:, 2] = 0.4 # arbitrary!
points = Points(space.xyY, xyY)
a = rawdata[:, 2]/1e3
b = rawdata[:, 3]/1e3
theta = rawdata[:, 4]*np.pi/180.
g11 = (np.cos(theta)/a)**2 + (np.sin(theta)/b)**2
g22 = (np.sin(theta)/a)**2 + (np.cos(theta)/b)**2
g12 = np.cos(theta)*np.sin(theta)*(1/a**2 - 1/b**2)
g = np.zeros((25, 3, 3))
g[:, 0, 0] = g11
g[:, 1, 1] = g22
g[:, 2, 2] = 1e3 # arbitrary!
g[:, 0, 1] = g12
g[:, 1, 0] = g12
return Tensors(space.xyY, g, points)
def g_three_observer():
"""
Wyszecki and Fielder's three observer data set.
Arbitrarily uses Y=0.4 and g33 = 1e3 for extension to 3D. It seems by
comparing the data file to the original paper by Wyszecki and Fielder
(JOSA, 1971) that only one of the data sets (GW) is represented in the
file. Also, the paper reports a full 3D metric, so the arbitrary extension
to 3D used here is not really called for.
Returns
-------
threeObserver : Tensors
The metric tensors corresponding to the three observer ellipsoids.
"""
f = open(resource_path('tensor_data/3 observer.txt'))
rawdata = f.readlines()[:-1]
f.close()
for line in range(len(rawdata)):
rawdata[line] = rawdata[line].split('\t')
for item in range(len(rawdata[line])):
rawdata[line][item] = float(rawdata[line][item].strip())
rawdata = np.array(rawdata)
xyY = rawdata[:, 1:4].copy()
xyY[:, 2] = 0.4 # arbitrary!
points = Points(space.xyY, xyY)
a = rawdata[:, 4] / 1e3 # correct?
b = rawdata[:, 5] / 1e3 # corect?
theta = rawdata[:, 3] * np.pi / 180.
g11 = (np.cos(theta) / a)**2 + (np.sin(theta) / b)**2
g22 = (np.sin(theta) / a)**2 + (np.cos(theta) / b)**2
g12 = np.cos(theta)*np.sin(theta)*(1 / a**2 - 1 / b**2)
g = np.zeros((28, 3, 3))
g[:, 0, 0] = g11
g[:, 1, 1] = g22
g[:, 2, 2] = 1e3 # arbitrary!
g[:, 0, 1] = g12
g[:, 1, 0] = g12
return Tensors(space.xyY, g, points)
def g_Melgosa_Lab():
"""
Melgosa's CIELAB-fitted ellipsoids for the RIT-DuPont data.
Copied verbatim from pdf of CRA paper. Uses the ellipsoids fitted
in CIELAB and returns Tensors.
Returns
-------
Melgosa : Tensors
The metric tensors corresponding to Melgosa's ellipsoids.
"""
m_gaa = np.array([0.6609, 0.3920, 1.3017, 0.1742, 0.5967, 0.5374,
0.2837, 0.6138, 0.7252, 1.6002, 0.1760, 0.8512,
0.0543, 0.3547, 0.2381, 0.1729, 0.7289, 0.9614,
0.2896])
m_gbb = np.array([0.2387, 0.4286, 0.5241, 0.5847, 0.4543, 0.3048,
0.3717, 0.2465, 0.4370, 0.4790, 0.2589, 0.4054,
0.7178, 0.2057, 0.3801, 0.2532, 0.4255, 0.1984,
0.0522])
m_gab = np.array([0.3080, -0.0386, 0.1837, 0.0632, -0.1913, 0.2772,
-0.1215, -0.0757, 0.1565, 0.0971, 0.0941, -0.2578,
-0.1148, 0.1671, 0.0229, 0.0362, 0.5275, 0.1822,
0.0023])
m_gLa = np.array([-0.0144, 0.0812, -0.1435, 0.0996, -0.0008, -0.0115,
0.0644, 0.0315, 0.2465, -0.0120, 0.1255, 0.1046,
0.1319, 0.0924, 0.0952, -0.0134, 0.0128, -0.1378,
-0.0459])
m_gLb = np.array([-0.1315, 0.0373, -0.1890, -0.1696, -0.1447, 0.0525,
-0.0927, -0.0833, -0.1251, 0.0357, -0.0153, 0.1334,
-0.1589, 0.1759, -0.1561, 0.0341, 0.0113, 0.0070,
-0.0288])
m_gLL = np.array([1.1973, 1.6246, 1.3061, 1.0817, 1.1507, 1.2378, 0.9709,
0.7855, 1.3469, 0.6585, 0.9418, 0.9913, 0.8693, 0.8080,
0.8277, 0.5755, 0.9311, 0.5322, 0.4228])
m_Lab_metric = np.zeros((19, 3, 3))
m_Lab_metric[:, 0, 0] = m_gLL
m_Lab_metric[:, 1, 1] = m_gaa
m_Lab_metric[:, 2, 2] = m_gbb
m_Lab_metric[:, 0, 1] = m_gLa
m_Lab_metric[:, 1, 0] = m_gLa
m_Lab_metric[:, 0, 2] = m_gLb
m_Lab_metric[:, 2, 0] = m_gLb
m_Lab_metric[:, 1, 2] = m_gab
m_Lab_metric[:, 2, 1] = m_gab
return Tensors(space.cielab, m_Lab_metric, d_Melgosa())
def g_Melgosa_xyY():
"""
Melgosa's xyY-fitted ellipsoids for the RIT-DuPont data.
Copied verbatim from pdf of CRA paper. Uses the ellipsoids fitted
in xyY and returns Tensors.
Returns
-------
Melgosa : Tensors
The metric tensors corresponding to Melgosa's ellipsoids.
"""
m_g11 = np.array([10.074, 5.604, 18.738, 3.718, 5.013, 7.462, 1.229,
7.634, 11.805, 3.578, 5.359, 1.770, 0.368, 9.407,
0.624, 2.531, 11.222, 26.497, 3.762])
m_g22 = np.array([3.762, 6.589, 14.619, 3.310, 13.314, 3.533, 5.774,
11.162, 7.268, 3.007, 1.769, 6.549, 2.348, 3.485,
2.091, 4.122, 2.623, 16.086, 4.597])
m_g12 = np.array([-5.498, -3.518, -12.571, 0.219, -4.689, -3.946, -0.365,
-6.096, -5.562, -2.698, -0.902, -2.052, 0.040, -4.170,
-0.434, -1.074, -4.884, -18.122, -1.715])
m_g13 = np.array([-1.607, 0.001, -0.776, -0.078, -0.037, 0.212, 0.683,
0.049, 0.560, -0.103, 0.223, 2.341, 0.538, -0.240, 1.825,
0.285, -2.174, -0.361, 0.064])
m_g23 = np.array([-0.509, -0.346, 0.147, -0.489, -0.121, -0.065, -1.676,
-0.020, -0.521, 0.831, -0.291, -1.436, -0.936, 0.480,
-3.806, -0.058, 0.659, 0.343, 0.088])
m_g33 = np.array([5.745, 2.426, 1.146, 1.111, 0.845, 2.311, 2.878, 0.287,
0.912, 21.381, 0.517, 9.775, 3.823, 0.687, 23.949, 0.564,
6.283, 0.160, 0.169])
m_xyY_metric = np.zeros((19, 3, 3))
m_xyY_metric[:, 0, 0] = m_g11
m_xyY_metric[:, 1, 1] = m_g22
m_xyY_metric[:, 2, 2] = m_g33
m_xyY_metric[:, 0, 1] = m_g12
m_xyY_metric[:, 1, 0] = m_g12
m_xyY_metric[:, 0, 2] = m_g13
m_xyY_metric[:, 2, 0] = m_g13
m_xyY_metric[:, 1, 2] = m_g23
m_xyY_metric[:, 2, 1] = m_g23
m_xyY_metric = 1e4*m_xyY_metric
return Tensors(space.xyY, m_xyY_metric, d_Melgosa())
def g_BFD(dataset='P'):
"""
Return the BFD data set ellipses of the required type.
Parameters
----------
dataset : string
The data set to use, either 'P', 'A', or '2', for perceptual,
accept, and both, respectively.
Returns
-------
bfd : Tensors
The BDF data set of the required type
"""
if dataset == 'P':
file_name = resource_path('tensor_data/BFD_P.txt')
elif dataset == 'A':
file_name = resource_path('tensor_data/BFD_A.txt')
elif dataset == '2':
file_name = resource_path('tensor_data/BFD (2).txt')
f = open(file_name, 'r')
rawdata = f.readlines()
f.close()
for line in range(len(rawdata)):
rawdata[line] = re.sub(r'\s+', ' ', rawdata[line]).strip()
rawdata[line] = rawdata[line].split(' ')
for item in range(len(rawdata[line])):
rawdata[line][item] = float(rawdata[line][item])
rawdata = np.array(rawdata)
xyY = rawdata[:, 0:3].copy()
xyY[:, 2] = xyY[:, 2] / 100
points = Points(space.xyY, xyY)
a = rawdata[:, 3] / 1e4 # correct?
b = a / rawdata[:, 4] # corect?
theta = rawdata[:, 5] * np.pi / 180.
g11 = (np.cos(theta) / a)**2 + (np.sin(theta) / b)**2
g22 = (np.sin(theta) / a)**2 + (np.cos(theta) / b)**2
g12 = np.cos(theta) * np.sin(theta) * (1 / a**2 - 1 / b**2)
g = np.zeros((np.shape(rawdata)[0], 3, 3))
g[:, 0, 0] = g11
g[:, 1, 1] = g22
g[:, 2, 2] = 1e3 # arbitrary!
g[:, 0, 1] = g12
g[:, 1, 0] = g12
return Tensors(space.xyY, g, points)
# =============================================================================
# Metric datasets
# =============================================================================
def m_rit_dupont():
"""
Read the full RIT-DuPont individual colour difference data from file.
Returns
-------
rit_dupont : dict
Dictionary with two datasets, dV, weights, and various metrics.
"""
dat = read_csv_file(
'metric_data/Mio_RIT_DuPont_Individual_Color_Difference_Data.csv')
lab1 = dat[:, 0:3]
lab2 = dat[:, 3:6]
rit_dupont = dict()
rit_dupont['data1'] = Points(space.cielab, lab1)
rit_dupont['data2'] = Points(space.cielab, lab2)
rit_dupont['dE_ab'] = dat[:, 6].copy()
rit_dupont['dE_00'] = dat[:, 7].copy()
rit_dupont['dE_94'] = dat[:, 8].copy()
rit_dupont['dV'] = dat[:, 9].copy()
rit_dupont['weights'] = dat[:, 10].copy()
return rit_dupont
def m_rit_dupont_T50():
"""
Read the reduced RIT-DuPont T50 colour difference data from file.
Returns
-------
rit_dupont : dict
Dictionary with two datasets and dV.
"""
dat = read_csv_file('metric_data/Data_RIT-DuPont.csv')
rit_dupont = dict()
rit_dupont['data1'] = Points(space.cielab, dat[:, 0:3].copy())
rit_dupont['data2'] = Points(space.cielab, dat[:, 3:6].copy())
rit_dupont['dV'] = dat[:, 6].copy()
return rit_dupont
# TODO:
#
# Metric data sets, as needed (instances of Tensors):
# BrownMacAdam
# +++
| gpl-3.0 |
probml/pyprobml | scripts/discrim_analysis_dboundaries_plot.py | 1 | 3204 | # Gaussian discriminant analysis in 2d
# Author: Duane Rich
# Based on matlab code by Kevin Murphy
#https://github.com/probml/pmtk3/blob/master/demos/discrimAnalysisDboundariesDemo.m
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = '../figures'
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
c = 'bgr'
m = 'xos'
n_samples = 30 # number of each class samples
model_names = ('LDA', 'QDA')
np.random.seed(0)
def mvn2d(x, y, u, sigma):
xx, yy = np.meshgrid(x, y)
xy = np.c_[xx.ravel(), yy.ravel()]
sigma_inv = np.linalg.inv(sigma)
z = np.dot((xy - u), sigma_inv)
z = np.sum(z * (xy - u), axis=1)
z = np.exp(-0.5 * z)
z = z / (2 * np.pi * np.linalg.det(sigma) ** 0.5)
return z.reshape(xx.shape)
# Each model specifies the means and covariances.
# If the covariances are equal across classes, dboundarioes
# will be linear even if we use QDA
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
model1 = ([[1.5, 1.5], [-1.5, -1.5]],
[np.eye(2)] * 2)
model2 = ([[1.5, 1.5], [-1.5, -1.5]],
[[[1.5, 0], [0, 1]], np.eye(2) * 0.7])
model3 = ([[0, 0], [0, 5], [5, 5]],
[np.eye(2)] * 3)
Sigma1 = np.array([[4, 1], [1, 2]])
Sigma2 = np.array([[2, 0], [0, 1]])
Sigma3 = np.eye(2)
model4 = ([[0, 0], [0, 5], [5, 5]],
[Sigma1, Sigma2, Sigma3])
models = [model1, model2, model3, model4]
models = [model4]
for n_th, (u, sigma) in enumerate(models):
# generate random points
x = [] # store sample points
y = [] # store class labels
nclasses = len(u) # means
for i in range(nclasses):
x.append(np.random.multivariate_normal(u[i], sigma[i], n_samples))
y.append([i] * n_samples)
points = np.vstack(x)
labels = np.hstack(y)
x_min, y_min = np.min(points, axis=0)
x_max, y_max = np.max(points, axis=0)
N = 100
x_range = np.linspace(x_min - 1, x_max + 1, N)
y_range = np.linspace(y_min - 1, y_max + 1, N)
xx, yy = np.meshgrid(x_range, y_range)
for k, model in enumerate((LDA(), QDA())):
#fit, predict
clf = model
clf.fit(points, labels)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(N, N)
z_p = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
#draw areas and boundries
plt.figure()
plt.pcolormesh(xx, yy, z)
plt.jet()
for j in range(nclasses):
plt.contour(xx, yy, z_p[:, j].reshape(N, N),
[0.5], lw=3, colors='k')
#draw points
for i, point in enumerate(x):
plt.plot(point[:, 0], point[:, 1], c[i] + m[i])
#draw contours
for i in range(nclasses):
prob = mvn2d(x_range, y_range, u[i], sigma[i])
cs = plt.contour(xx, yy, prob, colors=c[i])
plt.title('Seperate {0} classes using {1}'.
format(nclasses, model_names[k]))
save_fig('discrimAnalysisDboundariesDemo{}.pdf'.format(n_th * 2 + k))
plt.show()
| mit |
MJuddBooth/pandas | asv_bench/benchmarks/io/excel.py | 5 | 1173 | import numpy as np
from pandas import DataFrame, date_range, ExcelWriter, read_excel
from pandas.compat import BytesIO
import pandas.util.testing as tm
class Excel(object):
params = ['openpyxl', 'xlsxwriter', 'xlwt']
param_names = ['engine']
def setup(self, engine):
N = 2000
C = 5
self.df = DataFrame(np.random.randn(N, C),
columns=['float{}'.format(i) for i in range(C)],
index=date_range('20000101', periods=N, freq='H'))
self.df['object'] = tm.makeStringIndex(N)
self.bio_read = BytesIO()
self.writer_read = ExcelWriter(self.bio_read, engine=engine)
self.df.to_excel(self.writer_read, sheet_name='Sheet1')
self.writer_read.save()
self.bio_read.seek(0)
def time_read_excel(self, engine):
read_excel(self.bio_read)
def time_write_excel(self, engine):
bio_write = BytesIO()
bio_write.seek(0)
writer_write = ExcelWriter(bio_write, engine=engine)
self.df.to_excel(writer_write, sheet_name='Sheet1')
writer_write.save()
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
cqychen/quants | quants/DW/dwd_stock_price_rise_fall.py | 1 | 1639 | #coding=utf8
import tushare as ts;
import pymysql;
import time as dt
from datashape.coretypes import string
from pandas.io.sql import SQLDatabase
import sqlalchemy
import datetime
from sqlalchemy import create_engine
from pandas.io import sql
import threading
import pandas as pd;
import commands
import dateutil
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import sys
sys.path.append('../') #添加配置文件
from common_function import *
def read_data(stock_code):
'''
:param stock_code:
:return:
'''
cmd='''
select * from ods_tra_day_k where `code`='%s' order by `date` DESC
'''%stock_code
return pd.read_sql(cmd,conn)
def calculate_price_stock(stock_code,days_to_now=0):
'''
:param stock_code:
:return:
'''
rs=read_data(stock_code)
print(rs.head(10))
rs_close=rs['close']
print(rs_close.head(10))
print("frist",rs_close[1])
price_rate=[[0]*20]
for i in range(1,10):
price_rate[i]= float(rs_close[days_to_now+i]-rs_close[days_to_now])/rs_close[days_to_now+i]
print(price_rate)
if __name__ == '__main__':
print("--------------任务开始-----------------------------")
startTime=dt.time()
iphost,user,passwd=get_mysql_conn()
db='ods_data'
charset='utf8'
conn = pymysql.connect(user=user, passwd=passwd,host=iphost, db=db,charset=charset)
#--------------------脚本运行开始-------------------------------
calculate_price_stock('000001')
endTime=dt.time()
print("---------------脚本运行完毕,共计耗费时间%sS------------------"%(endTime-startTime))
| epl-1.0 |
k323r/wigglelab | python/PostProcessing/data/generateTestData.py | 1 | 2262 | #!/usr/bin/python3
from matplotlib import pyplot as plt
import numpy as np
def writeFile(outFile, timeSteps, data):
with open(outFile, 'w') as f:
f.writelines("# Forces\n# CofR : (0.000000e+00 0.000000e+00 0.000000e+00)\n# Time forces(pressure viscous porous) moment(pressure viscous porous)\n")
for t, x in zip(timeSteps, data):
f.writelines("".join([a for a in [str(t),
"\t(", # Forces
"(", str(x), " ", str(x + 1), " ", str(x + 2), ")" # pressure
" ",
"(", str(x + 3), " ", str(x + 4), " ", str(x + 5), ")" # viscous
" ",
"(", str(x + 6), " ", str(x + 7), " ", str(x + 8), ")" # porous
")",
" (", # Moments
"(", str(x), " ", str(x + 1), " ", str(x + 2), ")" # pressure
" ",
"(", str(x + 3), " ", str(x + 4), " ", str(x + 5), ")" # viscous
" ",
"(", str(x + 6), " ", str(x + 7), " ", str(x + 8), ")" # porous
")\n"]]
))
f.close()
if __name__ == "__main__":
f = 1.0
mag = 5.0
randMag = 0.1*mag
deltaT = 0.01
startTime = 0
stopTime = 10
outFileClean = "OpenFOAM4x/forces_synthetic_clean.dat"
outFileNoise = "OpenFOAM4x/forces_synthetic_noise.dat"
timeSteps = np.linspace(startTime, stopTime, num=((stopTime-startTime)/deltaT + 1), endpoint=True, dtype='f')
dataClean = mag * np.sin(2 * np.pi * f * timeSteps)
dataNoise = mag * np.sin(2 * np.pi * f * timeSteps) + np.random.normal(scale = randMag, size = len(timeSteps))
plt.figure(1)
plt.plot(timeSteps, dataClean, label="clean data")
plt.plot(timeSteps, dataNoise, label="noisy data, noise mag = {}".format(randMag))
plt.grid()
plt.legend(loc="best")
plt.hold()
plt.show()
writeFile(outFileClean, timeSteps, dataClean)
writeFile(outFileNoise, timeSteps, dataNoise)
| mit |
TakayukiSakai/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/dataframe.py | 3 | 5887 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DataFrame is a container for ingesting and preprocessing data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
import collections
from .series import Series
from .transform import Transform
class DataFrame(object):
"""A DataFrame is a container for ingesting and preprocessing data."""
__metaclass__ = ABCMeta
def __init__(self):
self._columns = {}
def columns(self):
"""Set of the column names."""
return frozenset(self._columns.keys())
def __len__(self):
"""The number of columns in the DataFrame."""
return len(self._columns)
def assign(self, **kwargs):
"""Adds columns to DataFrame.
Args:
**kwargs: assignments of the form key=value where key is a string
and value is an `inflow.Series`, a `pandas.Series` or a numpy array.
Raises:
TypeError: keys are not strings.
TypeError: values are not `inflow.Series`, `pandas.Series` or
`numpy.ndarray`.
TODO(jamieas): pandas assign method returns a new DataFrame. Consider
switching to this behavior, changing the name or adding in_place as an
argument.
"""
for k, v in kwargs.items():
if not isinstance(k, str):
raise TypeError("The only supported type for keys is string; got %s" %
type(k))
if isinstance(v, Series):
s = v
elif isinstance(v, Transform) and v.input_valency() == 0:
s = v()
# TODO(jamieas): hook up these special cases again
# TODO(soergel): can these special cases be generalized?
# elif isinstance(v, pd.Series):
# s = series.NumpySeries(v.values)
# elif isinstance(v, np.ndarray):
# s = series.NumpySeries(v)
else:
raise TypeError(
"Column in assignment must be an inflow.Series, pandas.Series or a"
" numpy array; got type '%s'." % type(v).__name__)
self._columns[k] = s
def select(self, keys):
"""Returns a new DataFrame with a subset of columns.
Args:
keys: A list of strings. Each should be the name of a column in the
DataFrame.
Returns:
A new DataFrame containing only the specified columns.
"""
result = type(self)()
for key in keys:
result[key] = self._columns[key]
return result
def __getitem__(self, key):
"""Indexing functionality for DataFrames.
Args:
key: a string or an iterable of strings.
Returns:
A Series or list of Series corresponding to the given keys.
"""
if isinstance(key, str):
return self._columns[key]
elif isinstance(key, collections.Iterable):
for i in key:
if not isinstance(i, str):
raise TypeError("Expected a String; entry %s has type %s." %
(i, type(i).__name__))
return [self.__getitem__(i) for i in key]
raise TypeError(
"Invalid index: %s of type %s. Only strings or lists of strings are "
"supported." % (key, type(key)))
def __setitem__(self, key, value):
if isinstance(key, str):
key = [key]
if isinstance(value, Series):
value = [value]
self.assign(**dict(zip(key, value)))
def build(self):
# We do not allow passing a cache here, because that would encourage
# working around the rule that DataFrames cannot be expected to be
# synced with each other (e.g., they shuffle independently).
cache = {}
tensors = {name: c.build(cache) for name, c in self._columns.items()}
return tensors
def to_input_fn(self, feature_keys=None, target_keys=None):
"""Build an input_fn suitable for use with Estimator.
Args:
feature_keys: the names of columns to be used as features. If None, all
columns except those in target_keys are used.
target_keys: the names of columns to be used as targets. None is
acceptable for unsupervised learning.
Returns:
A function that returns a pair of dicts (features, targets), each mapping
string names to Tensors.
Raises:
ValueError: when the feature and target key sets are non-disjoint
"""
if target_keys is None:
target_keys = []
if feature_keys is None:
feature_keys = self.columns() - set(target_keys)
else:
in_both = set(feature_keys) & set(target_keys)
if in_both:
raise ValueError(
"Columns cannot be used for both features and targets: %s" %
", ".join(in_both))
def input_fn():
# It's important to build all the tensors together in one DataFrame.
# If we did df.select() for both key sets and then build those, the two
# resulting DataFrames would be shuffled independently.
tensors = self.build()
# Note that (for now at least) we provide our columns to Estimator keyed
# by strings, so they are base features as far as Estimator is concerned.
# TODO(soergel): reconcile with FeatureColumn keys, Transformer etc.
features = {key: tensors[key] for key in feature_keys}
targets = {key: tensors[key] for key in target_keys}
return features, targets
return input_fn
| apache-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/mpl_toolkits/axisartist/angle_helper.py | 7 | 14787 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from math import floor
import numpy as np
import math
A = np.array
from mpl_toolkits.axisartist.grid_finder import ExtremeFinderSimple
def select_step_degree(dv):
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [ 1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_factors = [1.] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
degree_limits = np.concatenate([second_limits_,
minute_limits_,
degree_limits_])
degree_steps = np.concatenate([minsec_steps_,
minsec_steps_,
degree_steps_])
degree_factors = np.concatenate([second_factors,
minute_factors,
degree_factors])
n = degree_limits.searchsorted(dv)
step = degree_steps[n]
factor = degree_factors[n]
return step, factor
def select_step_hour(dv):
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2 , 3, 4, 6, 8, 12, 18, 24]
hour_factors = [1.] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = A(minsec_limits_)*(1./60.)
minute_factors = [60.] * len(minute_limits_)
second_limits_ = A(minsec_limits_)*(1./3600.)
second_factors = [3600.] * len(second_limits_)
hour_limits = np.concatenate([second_limits_,
minute_limits_,
hour_limits_])
hour_steps = np.concatenate([minsec_steps_,
minsec_steps_,
hour_steps_])
hour_factors = np.concatenate([second_factors,
minute_factors,
hour_factors])
n = hour_limits.searchsorted(dv)
step = hour_steps[n]
factor = hour_factors[n]
return step, factor
def select_step_sub(dv):
# subarcsec or degree
tmp = 10.**(int(math.log10(dv))-1.)
factor = 1./tmp
if 1.5*tmp >= dv:
step = 1
elif 3.*tmp >= dv:
step = 2
elif 7.*tmp >= dv:
step = 5
else:
step = 1
factor = 0.1*factor
return step, factor
def select_step(v1, v2, nv, hour=False, include_last=True,
threshold_factor=3600.):
if v1 > v2:
v1, v2 = v2, v1
dv = float(v2 - v1) / nv
if hour:
_select_step = select_step_hour
cycle = 24.
else:
_select_step = select_step_degree
cycle = 360.
# for degree
if dv > 1./threshold_factor:
#print "degree"
step, factor = _select_step(dv)
else:
step, factor = select_step_sub(dv*threshold_factor)
#print "feac", step, factor
factor = factor * threshold_factor
f1, f2, fstep = v1*factor, v2*factor, step/factor
levs = np.arange(math.floor(f1/step), math.ceil(f2/step)+0.5,
1, dtype="i") * step
# n : number of valid levels. If there is a cycle, e.g., [0, 90, 180,
# 270, 360], the grid line needs to be extended from 0 to 360, so
# we need to return the whole array. However, the last level (360)
# needs to be ignored often. In this case, so we return n=4.
n = len(levs)
# we need to check the range of values
# for example, -90 to 90, 0 to 360,
if factor == 1. and (levs[-1] >= levs[0]+cycle): # check for cycle
nv = int(cycle / step)
if include_last:
levs = levs[0] + np.arange(0, nv+1, 1) * step
else:
levs = levs[0] + np.arange(0, nv, 1) * step
n = len(levs)
return np.array(levs), n, factor
def select_step24(v1, v2, nv, include_last=True, threshold_factor=3600):
v1, v2 = v1/15., v2/15.
levs, n, factor = select_step(v1, v2, nv, hour=True,
include_last=include_last,
threshold_factor=threshold_factor)
return levs*15., n, factor
def select_step360(v1, v2, nv, include_last=True, threshold_factor=3600):
return select_step(v1, v2, nv, hour=False,
include_last=include_last,
threshold_factor=threshold_factor)
class LocatorBase(object):
def __init__(self, den, include_last=True):
self.den = den
self._include_last = include_last
def _get_nbins(self):
return self.den
def _set_nbins(self, v):
self.den = v
nbins = property(_get_nbins, _set_nbins)
def set_params(self, **kwargs):
if "nbins" in kwargs:
self.den = int(kwargs.pop("nbins"))
if kwargs:
raise ValueError("Following keys are not processed: %s" % \
", ".join([str(k) for k in kwargs.keys()]))
class LocatorHMS(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last)
class LocatorHM(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorH(LocatorBase):
def __call__(self, v1, v2):
return select_step24(v1, v2, self.den, self._include_last,
threshold_factor=1)
class LocatorDMS(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last)
class LocatorDM(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=60)
class LocatorD(LocatorBase):
def __call__(self, v1, v2):
return select_step360(v1, v2, self.den, self._include_last,
threshold_factor=1)
class FormatterDMS(object):
deg_mark = "^{\circ}"
min_mark = "^{\prime}"
sec_mark = "^{\prime\prime}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\mkern-4mu"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def _get_number_fraction(self, factor):
## check for fractional numbers
number_fraction = None
# check for 60
for threshold in [1, 60, 3600]:
if factor <= threshold:
break
d = factor // threshold
int_log_d = int(floor(math.log10(d)))
if 10**int_log_d == d and d!=1:
number_fraction = int_log_d
factor = factor // 10**int_log_d
return factor, number_fraction
return factor, number_fraction
def __call__(self, direction, factor, values):
if len(values) == 0:
return []
#ss = [[-1, 1][v>0] for v in values] #not py24 compliant
values = np.asarray(values)
ss = np.where(values>0, 1, -1)
sign_map = {(-1, True):"-"}
signs = [sign_map.get((s, v!=0), "") for s, v in zip(ss, values)]
factor, number_fraction = self._get_number_fraction(factor)
values = np.abs(values)
if number_fraction is not None:
values, frac_part = divmod(values, 10**number_fraction)
frac_fmt = "%%0%dd" % (number_fraction,)
frac_str = [frac_fmt % (f1,) for f1 in frac_part]
if factor == 1:
if number_fraction is None:
return [self.fmt_d % (s*int(v),) for (s, v) in zip(ss, values)]
else:
return [self.fmt_ds % (s*int(v), f1) for (s, v, f1) in \
zip(ss, values, frac_str)]
elif factor == 60:
deg_part, min_part = divmod(values, 60)
if number_fraction is None:
return [self.fmt_d_m % (s1, d1, m1) \
for s1, d1, m1 in zip(signs, deg_part, min_part)]
else:
return [self.fmt_d_ms % (s, d1, m1, f1) \
for s, d1, m1, f1 in zip(signs, deg_part, min_part, frac_str)]
elif factor == 3600:
if ss[-1] == -1:
inverse_order = True
values = values[::-1]
sings = signs[::-1]
else:
inverse_order = False
l_hm_old = ""
r = []
deg_part, min_part_ = divmod(values, 3600)
min_part, sec_part = divmod(min_part_, 60)
if number_fraction is None:
sec_str = [self.fmt_s_partial % (s1,) for s1 in sec_part]
else:
sec_str = [self.fmt_ss_partial % (s1, f1) for s1, f1 in zip(sec_part, frac_str)]
for s, d1, m1, s1 in zip(signs, deg_part, min_part, sec_str):
l_hm = self.fmt_d_m_partial % (s, d1, m1)
if l_hm != l_hm_old:
l_hm_old = l_hm
l = l_hm + s1 #l_s
else:
l = "$"+s1 #l_s
r.append(l)
if inverse_order:
return r[::-1]
else:
return r
else: # factor > 3600.
return [r"$%s^{\circ}$" % (str(v),) for v in ss*values]
class FormatterHMS(FormatterDMS):
deg_mark = "^\mathrm{h}"
min_mark = "^\mathrm{m}"
sec_mark = "^\mathrm{s}"
fmt_d = "$%d"+deg_mark+"$"
fmt_ds = r"$%d.\!\!"+deg_mark+"%s$"
# %s for signe
fmt_d_m = r"$%s%d"+deg_mark+"\,%02d"+min_mark+"$"
fmt_d_ms = r"$%s%d"+deg_mark+"\,%02d.\!\!"+min_mark+"%s$"
fmt_d_m_partial = "$%s%d"+deg_mark+"\,%02d"+min_mark+"\,"
fmt_s_partial = "%02d"+sec_mark+"$"
fmt_ss_partial = "%02d.\!\!"+sec_mark+"%s$"
def __call__(self, direction, factor, values): # hour
return FormatterDMS.__call__(self, direction, factor, np.asarray(values)/15.)
class ExtremeFinderCycle(ExtremeFinderSimple):
"""
When there is a cycle, e.g., longitude goes from 0-360.
"""
def __init__(self,
nx, ny,
lon_cycle = 360.,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (-90, 90)
):
#self.transfrom_xy = transform_xy
#self.inv_transfrom_xy = inv_transform_xy
self.nx, self.ny = nx, ny
self.lon_cycle, self.lat_cycle = lon_cycle, lat_cycle
self.lon_minmax = lon_minmax
self.lat_minmax = lat_minmax
def __call__(self, transform_xy, x1, y1, x2, y2):
"""
get extreme values.
x1, y1, x2, y2 in image coordinates (0-based)
nx, ny : number of divisions in each axis
"""
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
# iron out jumps, but algorithm should be improved.
# This is just naive way of doing and my fail for some cases.
# Consider replacing this with numpy.unwrap
# We are ignoring invalid warnings. They are triggered when
# comparing arrays with NaNs using > We are already handling
# that correctly using np.nanmin and np.nanmax
with np.errstate(invalid='ignore'):
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
lon -= 360. * ((lon - lon0) > 180.)
if self.lat_cycle is not None:
lat0 = np.nanmin(lat)
lat -= 360. * ((lat - lat0) > 180.)
lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
lon_min, lon_max, lat_min, lat_max = \
self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
return lon_min, lon_max, lat_min, lat_max
def _adjust_extremes(self, lon_min, lon_max, lat_min, lat_max):
lon_min, lon_max, lat_min, lat_max = \
self._add_pad(lon_min, lon_max, lat_min, lat_max)
# check cycle
if self.lon_cycle:
lon_max = min(lon_max, lon_min + self.lon_cycle)
if self.lat_cycle:
lat_max = min(lat_max, lat_min + self.lat_cycle)
if self.lon_minmax is not None:
min0 = self.lon_minmax[0]
lon_min = max(min0, lon_min)
max0 = self.lon_minmax[1]
lon_max = min(max0, lon_max)
if self.lat_minmax is not None:
min0 = self.lat_minmax[0]
lat_min = max(min0, lat_min)
max0 = self.lat_minmax[1]
lat_max = min(max0, lat_max)
return lon_min, lon_max, lat_min, lat_max
if __name__ == "__main__":
#test2()
#print select_step360(21.2, 33.3, 5)
#print select_step360(20+21.2/60., 21+33.3/60., 5)
#print select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5)
# test threshold factor
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=60))
print(select_step360(20.5+11.2/3600., 20.5+53.3/3600., 5,
threshold_factor=1))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 600, [12301, 12302, 12303]))
print(select_step360(20.5+21.2/3600., 20.5+21.4/3600., 5))
print(fmt("left", 36000, [738210, 738215, 738220]))
print(fmt("left", 360000, [7382120, 7382125, 7382130]))
print(fmt("left", 1., [45, 46, 47]))
print(fmt("left", 10., [452, 453, 454]))
if 0:
print(select_step360(20+21.2/60., 21+33.3/60., 5))
print(select_step360(20.5+21.2/3600., 20.5+33.3/3600., 5))
print(select_step360(20+21.2/60., 20+53.3/60., 5))
###
levs, n, factor = select_step360(20.5+21.2/3600., 20.5+27.25/3600., 5)
levs = levs * 0.1
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", factor, levs))
print(select_step(-180, 180, 10, hour=False))
print(select_step(-12, 12, 10, hour=True))
fmt = FormatterDMS()
#print fmt("left", 60, [0, -30, -60])
print(fmt("left", 3600, [0, -30, -60]))
| mit |
abmorton/stockhawk | app.py | 1 | 30024 | from flask import Flask, render_template, redirect, url_for, request, session, flash, Markup
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.cache import Cache
from flask.ext.mail import Mail, Message
# from celery import Celery
from sqlalchemy import desc
from yahoo_finance import Share
from forms import StockSearchForm, LoginForm, RegisterForm, PasswordReminderForm, PasswordResetForm, DeleteAccountForm, TradeForm, FullTradeForm
import datetime
import os
import config
# for plotting
from bokeh.plotting import figure, output_file, show
from bokeh.embed import components
import pandas as pd
from numpy import pi
# from helpers import get_datetime_today, pretty_numbers, pretty_ints, pretty_percent, pretty_leaders, get_leaderboard, get_user, get_account_details, clean_stock_search, get_Share, set_stock_data, write_stock_to_db, stock_lookup_and_write, search_company, convert_yhoo_date, trade
# --------------------------------------------------------------------
# Instatiate and configure app, db, cache, mail, celery, etc.:
app = Flask(__name__)
# app.config.from_object('config.DevConfig')
app.config.from_object(os.environ['APP_SETTINGS'])
cache = Cache(app)
mail = Mail(app)
# celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
# celery.conf.update(app.config)
db = SQLAlchemy(app)
# Import db models to be used, AFTER creating db or it fails!
from models import *
# ------------------------------------------------------------------
# helper functions to clean up app.py / view file
def get_datetime_today():
now = datetime.datetime.now()
today = datetime.date(now.year, now.month, now.day)
return today
# Converts numbers to more readable financial formats
def pretty_numbers(value):
return '${:,.2f}'.format(value)
def pretty_ints(value):
return '{:,}'.format(value)
def pretty_percent(value):
return '{:,.2f}%'.format(value)
def pretty_leaders(leaders):
for l in leaders:
l.prettyvalue = pretty_numbers(l.value)
return leaders
# Determines the color for gains/loses by passing a boolean value
# to the html template
def set_color(change):
if float(change) < 0.000000:
return True
else:
return False
# cache
def get_leaderboard(user):
allplayers = Portfolio.query.order_by(desc(Portfolio.value)).all()
leaders = Portfolio.query.order_by(desc(Portfolio.value)).limit(5).all()
leaders = pretty_leaders(leaders)
allplayers = pretty_leaders(allplayers)
if user != None:
user = User.query.filter_by(name=session['username']).first()
# finding player's position in leaderboard
for idx, val in enumerate(allplayers):
if user.portfolio == val:
user.rank = idx+1
else:
loggedin_user = None # needed?
user = None
return user, allplayers, leaders
def get_user():
if 'username' in session:
loggedin_user = session['username']
user = session['username']
else:
loggedin_user = None
user = None
return user
# bypass? or cache?
def get_account_details(portfolio, positions):
value = portfolio.cash
total_gain_loss = float(0.00)
total_cost = float(0.00)
portfolio.daily_gain = 0.000
for p in positions:
# stock_lookup_and_write(p.symbol) # unfactoring to use stock.stuff
stock = set_stock_data(Share(p.symbol))
write_stock_to_db(stock)
p.value = Stock.query.filter_by(symbol=p.symbol).first().price*p.sharecount
p.prettyvalue = pretty_numbers(p.value)
p.prettycost = pretty_numbers(p.cost)
value += p.value
p.gain_loss = p.value - p.cost
p.gain_loss_percent = p.gain_loss/p.cost*100
if p.gain_loss <= 0.0000:
p.loss = True
p.prettygain_loss = pretty_numbers(p.gain_loss)
total_gain_loss = float(p.gain_loss) + total_gain_loss
total_cost = float(p.cost) + total_cost
p.prettygain_loss_percent = pretty_percent(p.gain_loss_percent)
p.daily_gain = float(stock.change)*p.sharecount
p.prettydaily_gain = pretty_numbers(p.daily_gain)
if p.daily_gain <= 0.0000:
p.daily_gain_loss = True
portfolio.daily_gain += p.daily_gain
portfolio.prettydaily_gain = pretty_numbers(portfolio.daily_gain)
if portfolio.daily_gain <= 0.0000:
portfolio.daily_gain_loss = True
portfolio.total_cost = total_cost
portfolio.prettytotal_cost = pretty_numbers(total_cost)
portfolio.value = value
portfolio.prettyvalue = pretty_numbers(portfolio.value)
portfolio.prettycash = pretty_numbers(portfolio.cash)
portfolio.total_stock_value = portfolio.value - portfolio.cash
portfolio.prettytotal_stock_value = pretty_numbers(portfolio.total_stock_value)
portfolio.total_gain_loss = total_gain_loss
portfolio.prettytotal_gain_loss = pretty_numbers(portfolio.total_gain_loss)
if portfolio.total_cost != 0.00:
portfolio.total_gain_loss_percent = portfolio.total_gain_loss/portfolio.total_cost*100
portfolio.prettytotal_gain_loss_percent = pretty_percent(portfolio.total_gain_loss_percent)
else:
portfolio.total_gain_loss_percent = 0
portfolio.prettytotal_gain_loss_percent = "0%"
if portfolio.total_gain_loss < 0.00:
portfolio.loss = True
db.session.commit() # not necessary?
return portfolio, positions
# This is to take out punctuation and white spaces from the stock search string.
def clean_stock_search(symbol):
punctuation = '''!()-[]{ };:'"\,<>./?@#$%^&*_~0123456789'''
no_punct = ""
for char in symbol:
if char not in punctuation:
no_punct = no_punct + char
if len(no_punct) == 0:
no_punct = 'RETRY'
return no_punct
# bypass?
# @db_if_yahoo_fail
def get_Share(symbol):
stock = Share(clean_stock_search(symbol))
return stock
# Puts various attributes into 'stock' via different Share methods.
def set_stock_data(stock):
stock.name = stock.data_set["Name"]
stock.symbol = stock.data_set["Symbol"].upper()
stock.exchange = stock.get_stock_exchange()
stock.price = float(stock.get_price())
stock.prettyprice = pretty_numbers(stock.price)
stock.change = stock.get_change()
stock.percent_change = stock.data_set["PercentChange"]
stock.afterhours = stock.data_set['AfterHoursChangeRealtime']
stock.last_traded = stock.get_trade_datetime()
stock.prev_close = stock.get_prev_close()
stock.open = stock.get_open()
stock.bid = stock.data_set['Bid']
stock.ask = stock.data_set['Ask']
stock.yr_target = stock.data_set['OneyrTargetPrice']
stock.volume = stock.get_volume()
stock.av_volume = stock.get_avg_daily_volume()
stock.day_low = stock.get_days_low()
stock.day_high = stock.get_days_high()
stock.day_range = str(stock.day_high)+" - "+str(stock.day_low)
stock.year_high = stock.get_year_high()
stock.year_low = stock.get_year_low()
stock.year_range = str(stock.year_high)+" - "+str(stock.year_low)
stock.market_cap = stock.data_set["MarketCapitalization"]
stock.peratio = stock.data_set["PERatio"]
if stock.peratio != None:
stock.prettyperatio = pretty_numbers(float(stock.peratio))
else:
stock.prettyperatio = None
stock.div = stock.data_set["DividendYield"]
# not sure why this is causing problems, commenting for now
# stock.div = float(stock.div)
stock.prettyex_div = stock.data_set['ExDividendDate']
stock.ex_div = convert_yhoo_date(stock.data_set['ExDividendDate'])
stock.prettydiv_pay = stock.data_set['DividendPayDate']
stock.div_pay = convert_yhoo_date(stock.data_set['DividendPayDate'])
stock.view_count = 1
stock.loss = set_color(stock.change)
return stock
def write_stock_to_db(stock):
# Here, the input 'stock' argument is a stock object
# which has been passed through the set_stock_data function.
# it might be worth taking the commit()s outside of the function
if Stock.query.filter_by(symbol=stock.symbol).first() == None:
db.session.add(Stock(stock.symbol, stock.name, stock.exchange, stock.price, \
stock.div, stock.ex_div, stock.div_pay, stock.market_cap, stock.view_count))
db.session.commit()
else:
write_stock = Stock.query.filter_by(symbol=stock.symbol).first()
write_stock.view_count += 1
write_stock.price = stock.price
write_stock.div_yield = stock.div
write_stock.ex_div = stock.ex_div
write_stock.div_pay = stock.div_pay
write_stock.market_cap = stock.market_cap
db.session.commit()
# Look up a stock based on a 'cleaned' input string
def stock_lookup_and_write(symbol):
stock = set_stock_data(Share(symbol))
write_stock_to_db(stock)
return stock
# I don't think I've implemented this everywhere yet, need to review.
def search_company(symbol):
symbol = "%"+symbol+"%"
# results = Stock.query.filter(Stock.name.ilike(symbol)).first()
results = Stock.query.filter(Stock.name.ilike(symbol)).all()
return results
# Yahoo dates are strings that look like "8/12/2015"; we need to
# convert this into a python datetime format for the db.
def convert_yhoo_date(yhoo_date):
# argument yhoo_date should look like "8/6/2015" or None.
if yhoo_date != None:
# split and unpack month, day, year variables
month, day, year = yhoo_date.split('/')
# convert from strings to integers, for datetime.date function below
month = int(month)
day = int(day)
year = int(year)
# create datetime object
return datetime.date(year, month, day)
else:
return None
def trade(stock, share_amount, buy_or_sell, user, portfolio, positions):
stock = set_stock_data(stock)
write_stock_to_db(stock) # NOW?
# get actual stock in db ## WHY?
stock = Stock.query.filter_by(symbol=stock.symbol).first()
# price and total_cost should be float
price = (stock.price) #I don't think this is strictly necessary.
total_cost = float(share_amount*price)
today = get_datetime_today()
# 1 or -1 multiplier against share_amount
if buy_or_sell == 'buy':
# wants to buy
bs_mult = 1
total_cost = total_cost*bs_mult
# check to see if user has enough cash available
cash = float(portfolio.cash)
if cash > total_cost:
new_cash = cash - total_cost
# for new positions in a given stock
if portfolio.positions.filter_by(symbol=stock.symbol).first() == None:
# create & write the new position
position = Position(user.portfolio.id, stock.symbol, total_cost, total_cost, share_amount, None)
db.session.add(position)
db.session.commit()
flash(" Opened position in " + stock.name + ".")
# now create trade (need datetime object)
trade = Trade(stock.symbol, position.id, user.portfolio.id, total_cost, share_amount, today, stock.div_yield, stock.ex_div, stock.div_pay)
db.session.add(trade)
# db.session.commit()
flash("You bought " + str(share_amount) + " shares of " + stock.name + " at " + pretty_numbers(price) + " per share.")
# adjusting user.portfolio.cash
user.portfolio.cash = new_cash
db.session.commit()
flash("Cash adjusted: -" + pretty_numbers(total_cost))
# for already existing positions
elif user.portfolio.positions.filter_by(symbol=stock.symbol).all() != None:
position = user.portfolio.positions.filter_by(symbol=stock.symbol).first()
# found the position, now adjust share count.
trade = Trade(stock.symbol, position.id, user.portfolio.id, total_cost, share_amount, today, stock.div_yield, stock.ex_div, stock.div_pay)
db.session.add(trade)
flash("You bought " + str(share_amount) + " shares of " + stock.name + " at " + pretty_numbers(price) + " per share.")
user.portfolio.cash = new_cash
position.cost = float(position.cost) + total_cost
position.value = float(position.value) + total_cost
position.sharecount += share_amount
db.session.commit()
else:
deficit = total_cost - cash
flash("Sorry, that costs "+ pretty_numbers(total_cost) + ", which is " + pretty_numbers(deficit) + " more than you have available. Try buying fewer shares.")
else:
# wants to sell
bs_mult = -1
total_cost = total_cost*bs_mult
# check to see if there are enough stocks in the user's position
position = user.portfolio.positions.filter_by(symbol=stock.symbol).first()
if position != None:
if position.sharecount >= share_amount:
trade = Trade(stock.symbol, position.id, user.portfolio.id, total_cost, -1*share_amount, today, stock.div_yield, stock.ex_div, stock.div_pay)
db.session.add(trade)
flash("You sold " + str(share_amount) + " shares of " + stock.name + " at " + pretty_numbers(stock.price) + " per share. Adding " + pretty_numbers(total_cost*-1) + " to your cash balance.")
# update position
user.portfolio.cash = float(user.portfolio.cash) - total_cost
position.cost = float(position.cost) + total_cost
position.value = float(position.value) + total_cost
position.sharecount = position.sharecount + share_amount*bs_mult
# I'll remove this one if I can figure out the bug with Heroku's db.
db.session.commit()
# close position if no more shares
if position.sharecount == 0:
try:
db.session.delete(position)
db.session.commit()
flash("Your position in " + stock.name + " has been closed.")
except:
flash("Your position in " + stock.name + " is now empty. I'm working on a way to remove it from the database.")
else:
flash("You only have " + str(position.sharecount) + " shares of " + str(stock.symbol) + ". Try selling fewer shares.")
else:
flash("You don't have any shares of " + stock.symbol + " to sell.")
def prepare_stock_graph(symbol, start, end):
stock = Share(symbol)
stringprices = list(pd.DataFrame(stock.get_historical(start,end))['Adj_Close'])
stringdates = list(pd.DataFrame(stock.get_historical(start, end))['Date'])
prices = [float(p) for p in stringprices]
dates = []
for d in stringdates:
year, month, day = d.split('-')
d = datetime.date(int(year), int(month), int(day))
dates.append(d)
return prices, dates
def build_portfolio_pie(portfolio, positions):
percent_base = 0.00
percents = [percent_base]
for p in positions:
p.position_value_percentage = float(p.value)/float(portfolio.value-portfolio.cash)
percent_base = percent_base + float(p.position_value_percentage)
percents.append(percent_base)
# percents.append(float(portfolio.cash)/float(portfolio.value))
stocknames = [p.symbol for p in positions]
# stocknames = stocknames.append('Cash')
starts = [float(p)*2*pi for p in percents[:-1]]
ends = [float(p)*2*pi for p in percents[1:]]
# ends.append(starts[:-1])
color_palette = ['aqua', 'aquamarine', 'cadetblue', 'chartreuse', 'cornflowerblue','darkslateblue', 'darkslategray', 'deepskyblue','dodgerblue','lawngreen', 'lightblue', 'lightcyan', 'lightseagreen', 'lightsteelblue', 'mediumaquamarine','mediumblue','mediumseagreen', 'blue', 'green', 'navy','indigo','purple','cyan','darkblue','darkcyan','darkseagreen', 'darkturquoise', 'forestgreen','mediumturquoise']
colors = [color_palette[n] for n in range(0,len(percents))]
p = figure(x_range=(-1.1,1.85), y_range=(-1,1), title='Stock positions', toolbar_location='below', tools='', width=420, plot_height=320)
for n in range(0,len(positions)):
p.wedge(x=0, y=0, radius=1, start_angle=starts[n], end_angle=ends[n], color=colors[n], legend=stocknames[n])
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
p.xaxis.major_tick_line_color = None
p.xaxis.minor_tick_line_color = None
p.yaxis.major_tick_line_color = None
p.yaxis.minor_tick_line_color = None
p.outline_line_color = None
script, div = components(p)
return script, div, colors
def build_stock_plot(symbol, dates, prices):
average_price = float(sum(prices))/len(prices)
average_dates = [0 for n in prices]
first_price = prices[-1]
p = figure(width=610, plot_height=300, tools='pan,box_zoom,reset', title='1 month period', x_axis_label=None, x_axis_type='datetime', y_axis_label='$ per share', toolbar_location='below')
p.line(dates, prices, color='navy', alpha=0.9, line_width=2, legend=symbol.upper())
p.line(dates, average_price, color='orange', legend='Average', alpha=0.4, line_width=1.5)
# p.line(dates, first_price, color='red', legend='Starting', alpha=0.4, line_width=1.5)
p.ygrid.minor_grid_line_color = 'navy'
p.ygrid.minor_grid_line_alpha = 0.1
p.legend.orientation = 'top_left'
p.legend.label_text_font = 'Helvetica'
script, div = components(p)
return script, div
# === decorator and email imports ======
from decorators import *
from emails import send_async_email, send_email, new_user_email, password_reminder_email, password_reset_email
# Importing email functions here since they use the above decorator.
#=== views ====================
@app.errorhandler(404)
def not_found(e):
flash('Resource not found.')
user = get_user()
return render_template('/404.html', loggedin_user=user)
@app.route('/about')
@login_reminder
def about():
title = 'About StockHawk'
user = get_user()
return render_template('index.html', title=title, loggedin_user=user)
@app.route('/register', methods=['GET', 'POST'])
def register():
title = 'Register a new account'
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
now = datetime.datetime.now()
username = form.username.data.lower()
email = form.email.data
password = form.password.data
if User.query.filter_by(name=username).first() == None:
if User.query.filter_by(email=email).first() == None:
user = User(username, email, password, now)
db.session.add(user)
db.session.commit()
# create portfolio for the user at the same time
port = Portfolio(user.id, 1000000, 1000000)
db.session.add(port)
db.session.commit()
session['logged_in'] = True
session['username'] = user.name
flash('Thanks for registering!')
flash('$1,000,000.00 was added to your account.')
new_user_email(user)
return redirect(url_for('user'))
else:
flash('That email is already registered with a user. Please log in or register another user.')
return redirect(url_for('register'))
else:
flash('That user name already exists.')
return redirect(url_for('register'))
elif request.method == 'POST' and not form.validate():
flash('Try again.')
elif request.method == 'GET':
return render_template('register.html', title=title, form=form)
return render_template('register.html', title=title, form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
form = LoginForm(request.form)
title = 'Login'
if request.method == 'POST' and form.validate():
user = User.query.filter_by(name=form.username.data).first()
if user != None:
userpw = user.password
if userpw == form.password.data:
session['logged_in'] = True
# experiment
session['username'] = request.form['username']
flash('You were just logged in.')
user.last_seen = datetime.datetime.now()
db.session.commit()
return redirect(url_for('user'))
else:
flash('Incorrect password for that user name, please try again.')
return redirect(url_for('login'))
else:
# Allowing the user to sign in using email.
user = User.query.filter_by(email=form.username.data).first()
if user != None:
userpw = user.password
if userpw == form.password.data:
session['logged_in'] = True
session['username'] = user.name
flash('You were just logged in.')
user.last_seen = datetime.datetime.now()
db.session.commit()
return redirect(url_for('user'))
else:
flash('That user name does not exist in our system. Please try again or sign up for a new account.')
return redirect(url_for('login'))
return render_template('login.html', form=form, error=error, title=title)
elif request.method == 'POST' and not form.validate():
flash('Invalid username or password. Try again or register a new account.')
return redirect(url_for('login'))
elif request.method == 'GET':
return render_template('login.html', form=form, error=error, title=title)
@app.route('/logout')
@login_required
def logout():
session.pop('logged_in', None)
session.pop('username', None)
flash('You were just logged out.')
return redirect(url_for('stocks'))
@app.route('/password_reminder', methods=['GET', 'POST'])
def password_reminder():
error = None
form = PasswordReminderForm(request.form)
title = "Forgot your password?"
if request.method == 'POST' and form.validate():
user = User.query.filter_by(name=form.username.data).first()
if user != None:
password_reminder_email(user)
flash("Sent reminder email to "+user.name+"'s email address. Please check your inbox and sign in. Check your spam folder if you don't see our email within a couple of minutes.")
return redirect(url_for('login'))
else:
# Allowing the user to sign in using email.
user = User.query.filter_by(email=form.username.data).first()
if user != None:
password_reminder_email(user)
flash("Sent reminder email to "+user.email+". Please check your inbox and sign in. Check your spam folder if you don't see our email within a couple of minutes.")
return redirect(url_for('login'))
else:
flash("We couldn't find any user with that username or email address. Please try a different name/address or register a new account.")
elif request.method == 'POST' and not form.validate():
flash('Invalid username or password. Try again or register a new account.')
return redirect(url_for('password_reminder'))
# elif request.method == 'GET':
return render_template('password_reminder.html', form=form, title=title, error=error)
@app.route('/db_view')
@login_reminder
# @cache.cached(timeout=40)
# unless I figure out a better way, I can't cache user pages. Two concurrent users are able to see the other's page if it's in cache!
def db_view():
title = "Under the hood"
user = get_user()
stocks = Stock.query.all()
users = User.query.all()
trades = Trade.query.all()
portfolios = Portfolio.query.all()
positions = Position.query.all()
return render_template("db_view.html", title=title, stocks=stocks, users=users, trades=trades, positions=positions, portfolios=portfolios, loggedin_user=user)
@app.route('/aboutadam')
def aboutadam():
return render_template('aboutadam.html')
@app.route('/tos')
def tos():
return render_template('tos.html')
@app.route('/news')
@login_reminder
def news():
title = 'Release log'
user = get_user()
return render_template('news.html', title=title, loggedin_user=user)
@app.route('/leaderboard')
@login_reminder
def leaderboard():
title = "Leaderboard"
flash("This page is under development. It will look nicer soon!")
loggedin_user = get_user()
user, allplayers, leaders = get_leaderboard(loggedin_user)
return render_template('leaderboard.html', title=title, leaders=allplayers, loggedin_user=loggedin_user)
@app.route('/user', methods=['GET', 'POST'])
@login_required
def user():
today = get_datetime_today()
form = FullTradeForm(request.form)
loggedin_user = get_user()
user = User.query.filter_by(name=session['username']).first()
title = user.name+"'s account summary"
portfolio = user.portfolio
positions = portfolio.positions.all()
for p in positions:
p.prettysharecount = pretty_ints(p.sharecount)
if request.method == 'GET':
# refresh current stock prices and therefore account value
portfolio, positions = get_account_details(portfolio, positions)
script, div, colors = build_portfolio_pie(portfolio, positions)
return render_template('account.html', title=title, user=user, portfolio=portfolio, form=form, loggedin_user=loggedin_user, positions=positions, script=script, div=div, colors=colors)
elif request.method == 'POST' and form.validate():
stock = get_Share(form.symbol.data)
# stock = Share(clean_stock_search(form.symbol.data))
share_amount = form.share_amount.data
buy_or_sell = form.buy_or_sell.data
if stock.get_price() == None:
# If it's POST and valid, but there's no such stock
flash("Couldn't find stock matching "+form.symbol.data.upper()+". Try another symbol.")
return redirect(url_for('user'))
else:
# if it's POSTed, validated, and there actually is a real stock
trade(stock, share_amount, buy_or_sell, user, portfolio, positions)
return redirect(url_for('user'))
elif request.method == 'POST' and not form.validate():
flash('Invalid values. Please try again.')
return redirect(url_for('user'))
@app.route('/settings', methods=['GET', 'POST'])
@login_required
def settings():
loggedin_user = get_user()
user, allplayers, leaders = get_leaderboard(loggedin_user)
form = PasswordResetForm(request.form)
deleteform = DeleteAccountForm(request.form)
title = "{}'s account settings".format(user.name)
if request.method == 'POST' and form.validate():
if form.old_password.data == user.password:
flash("Your password has been reset.")
user.password = form.new_password.data
db.session.commit()
password_reset_email(user)
return redirect(url_for('user'))
else:
flash("Your old password was incorrect. Please try again.")
return redirect(url_for('settings'))
elif request.method == 'POST' and not form.validate():
flash("Something went wrong; please try again.")
return redirect(url_for('settings'))
else:
return render_template('settings.html', title=title, loggedin_user=loggedin_user, user=user, form=form, deleteform=deleteform)
@app.route('/delete_account', methods=['GET', 'POST'])
@login_required
def delete_account():
deleteform = DeleteAccountForm(request.form)
loggedin_user = get_user()
user, allplayers, leaders = get_leaderboard(loggedin_user)
if request.method == 'POST' and deleteform.validate():
if deleteform.confirm.data.upper() == 'DELETE':
db.session.delete(user)
db.session.commit()
flash("Your account has been deleted.")
return redirect(url_for('logout'))
else:
flash('Type "DELETE" in the field below if you are sure you want to delete your account; this cannot be undone.')
return redirect(url_for('settings'))
elif request.method == 'POST' and not deleteform.validate():
flash('Type "DELETE" in the field below if you are sure you want to delete your account; this cannot be undone.')
return redirect(url_for('settings'))
@app.route('/', methods=['GET', 'POST'])
@login_reminder
def stocks():
title = 'StockHawk'
stock = None
loggedin_user = get_user()
user, allplayers, leaders = get_leaderboard(loggedin_user)
form = StockSearchForm(request.form)
tradeform = TradeForm(request.form)
stocks = Stock.query.order_by(desc(Stock.view_count)).limit(10).all()
if request.method == 'POST':
if form.validate():
stock = get_Share(form.stocklookup.data)
# stock = Share(clean_stock_search(form.stocklookup.data))
if stock.data_set['Open'] == None:
# company lookup goes here
company_results = search_company(form.stocklookup.data)
stock = None
if len(company_results) == 0:
flash("Couldn't find symbol or company matching "+form.stocklookup.data.upper()+". Try searching for something else.")
else:
flash("Didn't find that symbol, but found " + str(len(company_results)) +" matching company names:")
return render_template('stocks.html', stock=stock, form=form, stocks=stocks, leaders=leaders, user=user, loggedin_user=loggedin_user, results=company_results)
else:
# There is a stock with this symbol, serve the dynamic page
stock = set_stock_data(stock)
# Some stocks appear to not have company names
if stock.name != None:
title = stock.symbol+" - "+stock.name
else:
title = stock.symbol+" - Unnamed company"
write_stock_to_db(stock)
return redirect(url_for('stock', symbol=stock.symbol))
elif not form.validate():
flash("Please enter a stock.")
return redirect(url_for('stocks'))
return render_template('stocks.html', form=form, tradeform=tradeform, stock=stock, leaders=leaders, title=title, user=user, loggedin_user=loggedin_user)
elif request.method == 'GET':
for s in stocks:
s.prettyprice = pretty_numbers(s.price)
return render_template('stocks.html', form=form, tradeform=tradeform, stock=stock, stocks=stocks, leaders=leaders, title=title, user=user, loggedin_user=loggedin_user)
@app.route('/<symbol>', methods=['GET', 'POST'])
def stock(symbol):
stock = get_Share(symbol)
if stock.data_set['Open'] == None:
# flash("Couldn't find that stock. Try another symbol.")
stock = None
return redirect(url_for('stocks'))
else:
# you wrote a function for these two lines, replace here!
stock = set_stock_data(Share(symbol))
write_stock_to_db(stock)
### ^^
title = stock.name
loggedin_user = get_user()
user, allplayers, leaders = get_leaderboard(loggedin_user)
form = StockSearchForm(request.form)
tradeform = TradeForm(request.form)
stocks = Stock.query.order_by(desc(Stock.view_count)).limit(10).all()
if user != None:
portfolio = user.portfolio
portfolio.prettycash = pretty_numbers(portfolio.cash)
# This is to show many shares much of that particular stock a user has in his/her position.
positions = portfolio.positions
position = portfolio.positions.filter_by(symbol=symbol).first()
if position:
position.prettysharecount = pretty_ints(position.sharecount)
else:
portfolio = None
position = None
positions = None
if request.method == 'POST' and tradeform.validate():
share_amount = tradeform.amount.data
buy_or_sell = tradeform.buy_or_sell.data
if stock.get_price() == None:
# If it's POST and valid, but there's no such stock
flash("Couldn't find stock matching "+symbol.upper()+". Try another symbol.")
return redirect(url_for('stocks'))
else:
# if it's POSTed, validated, and there is a real stock
trade(stock, share_amount, buy_or_sell, user, portfolio, positions)
return redirect(url_for('user'))
elif request.method == 'POST' and not tradeform.validate():
flash("Invalid share amount; please try again.")
return redirect(url_for('stocks'))
if request.method == 'GET':
start = '2015-09-30'
end = '2015-10-31'
prices, dates = prepare_stock_graph(symbol, start, end)
script, div = build_stock_plot(symbol, dates, prices)
return render_template('stock.html', form=form, tradeform=tradeform, stock=stock, stocks=stocks, leaders=leaders, title=title, user=user, loggedin_user=loggedin_user, position=position, script=script, div=div)
if __name__ == '__main__':
# app.run(host='0.0.0.0')
app.run()
| mit |
dwettstein/pattern-recognition-2016 | search/plot_accuracy.py | 1 | 4494 | import re
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde
from utils.fio import get_absolute_path
def parse_log(filepath):
pat = re.compile('(x?)\s*(\S+) -> (\S+)\s+'
'#train-words: (\d+)\s+'
'#candidates: (\d+)\s+'
'min-dist: (\d+)\s+'
'votes: (\d+)\s+'
'cpu-time: (\d+\.\d+)\s+'
'id: (\S+)')
co = []
y_in = []
y_out = []
nt = []
nc = []
md = []
v = []
cpu = []
wid = []
for lineN, line in enumerate(open(filepath, 'r')):
if lineN < 6:
continue
mat = pat.match(line.strip())
if mat:
co.append(mat.group(1) != 'x')
y_in.append(mat.group(2))
y_out.append(mat.group(3))
nt.append(int(mat.group(4)))
nc.append(int(mat.group(5)))
md.append(int(mat.group(6)))
v.append(int(mat.group(7)))
cpu.append(float(mat.group(8)))
wid.append(mat.group(9))
co = np.array(co)
nt = np.array(nt)
return co, y_in, y_out, nt, nc, md, v, cpu, wid
def plot_accuracy(counts, labels, ntrain, cpu):
oaa = sum(counts) / len(counts)
cotr = counts[ntrain > 0]
acc = sum(cotr) / len(cotr)
cput = sum(cpu) / 60
print('\nOverall accuracy: %.2f' % oaa)
print('Accuracy given at least 1 training sample: %.2f' % acc)
print('CPU time: %0.2f min' % cput)
d = {}
for lbl, cor, ntr, in zip(labels, counts, ntrain):
if lbl in d:
d[lbl][0].append(cor)
d[lbl][1].append(ntr)
else:
d[lbl] = ([cor], [ntr])
print('')
cte = np.array([sum(x[0]) for x in d.values()])
num = np.array([len(x[0]) for x in d.values()])
y = cte / num
x = np.array([x[1][0] for x in d.values()])
x2 = np.array([len(x) for x in d])
fig = plt.figure()
ax = fig.add_subplot(121)
# ax.plot(x, y, '.')
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
sc = ax.scatter(x, y, c=z, s=100, edgecolor='')
ax.set_ylim([-0.05, 1.05])
ax.set_xlim([-5, max(x) + 5])
plt.grid()
plt.xlabel('# training samples (for given label)')
plt.ylabel('accuracy')
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel('label density')
# cbar.set_ticks([0, 0.25, 0.5, 0.75, 1])
# cbar.set_ticklabels(['0', '0.25', '0.5', '0.75', '1'], update_ticks=True)
ax2 = fig.add_subplot(122)
xy2 = np.vstack([x2, y])
z2 = gaussian_kde(xy2)(xy2)
sc2 = ax2.scatter(x2, y, c=z2, s=100, edgecolor='')
ax2.set_ylim([-0.05, 1.05])
ax2.set_xlim([-5, max(x2) + 5])
plt.grid()
plt.xlabel('word length')
plt.ylabel('accuracy')
cbar = plt.colorbar(sc2)
cbar.ax.set_ylabel('label density')
plt.show()
def filter_results(lbls, cnts, ntrs, wids, a_filt=None, n_filt=None, l_filt=None):
d = {}
for l, i, c, n in zip(lbls, wids, cnts, ntrs):
if l in d:
d[l][0].append(c)
d[l][2].append(i)
else:
d[l] = [[c], n, [i]]
for l in d:
flag = True
if a_filt is not None:
acc = sum(d[l][0]) / len(d[l][0])
c_estr = str(acc) + a_filt
flag = flag and eval(c_estr)
if n_filt is not None:
n_estr = str(d[l][1]) + n_filt
flag = flag and eval(n_estr)
if l_filt is not None:
l_estr = str(len(l)) + l_filt
flag = flag and (eval(l_estr))
if flag:
print('%s, %s' % (l, d[l][2]))
return l, d[l][2]
return None, None
def main():
# validation log
p = get_absolute_path('search/log/16-05-23_23-46_classification.log')
co1, y_in1, y_out1, nt1, nc1, md1, v1, cpu1, ids1 = parse_log(p)
plot_accuracy(co1, y_in1, nt1, cpu1)
# example of how to filter for a specific point in the graph
label, wid = filter_results(y_in1, co1, nt1, ids1, a_filt='<0.1', n_filt='>10')
# training log
p = get_absolute_path('search/log/16-05-23_23-38_classification.log')
co2, y_in2, y_out2, nt2, nc2, md2, v2, cpu2, ids2 = parse_log(p)
plot_accuracy(co2, y_in2, nt2, cpu2)
label, wid = filter_results(y_in2, co2, nt2, ids2, a_filt='<0.01', l_filt='>14')
plot_accuracy(np.append(co1, co2), np.append(y_in1, y_in2), np.append(nt1, nt2), np.append(cpu1, cpu2))
if __name__ == '__main__':
main()
| mit |
szhem/spark | python/setup.py | 5 | 10182 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='dev@spark.apache.org',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.7'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
rosswhitfield/mantid | scripts/SANS/sans/algorithm_detail/beamcentrefinder_plotting.py | 3 | 1330 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import sys
IN_WORKBENCH = False
if "workbench.app.mainwindow" in sys.modules:
try:
from mantidqt.plotting.functions import plot
IN_WORKBENCH = True
except ImportError:
pass
def can_plot_beamcentrefinder():
return IN_WORKBENCH
def _plot_quartiles_matplotlib(output_workspaces, sample_scatter):
title = '{}_beam_centre_finder'.format(sample_scatter)
ax_properties = {'xscale': 'log',
'yscale': 'log'}
plot_kwargs = {"scalex": True,
"scaley": True}
if not isinstance(output_workspaces, list):
output_workspaces = [output_workspaces]
assert output_workspaces, "No workspaces were passed into plotting"
plot(output_workspaces, wksp_indices=[0], ax_properties=ax_properties, overplot=True,
plot_kwargs=plot_kwargs, window_title=title)
def plot_workspace_quartiles(output_workspaces, sample_scatter):
if IN_WORKBENCH:
_plot_quartiles_matplotlib(output_workspaces, sample_scatter)
| gpl-3.0 |
jwiggins/scikit-image | doc/ext/plot2rst.py | 21 | 20507 | """
Example generation from python files.
Generate the rst files for the examples by iterating over the python
example files. Files that generate images should start with 'plot'.
To generate your own examples, add this extension to the list of
``extensions``in your Sphinx configuration file. In addition, make sure the
example directory(ies) in `plot2rst_paths` (see below) points to a directory
with examples named `plot_*.py` and include an `index.rst` file.
This code was adapted from scikit-image, which took it from scikit-learn.
Options
-------
The ``plot2rst`` extension accepts the following options:
plot2rst_paths : length-2 tuple, or list of tuples
Tuple or list of tuples of paths to (python plot, generated rst) files,
i.e. (source, destination). Note that both paths are relative to Sphinx
'source' directory. Defaults to ('../examples', 'auto_examples')
plot2rst_rcparams : dict
Matplotlib configuration parameters. See
http://matplotlib.sourceforge.net/users/customizing.html for details.
plot2rst_default_thumb : str
Path (relative to doc root) of default thumbnail image.
plot2rst_thumb_shape : float
Shape of thumbnail in pixels. The image is resized to fit within this shape
and the excess is filled with white pixels. This fixed size ensures that
that gallery images are displayed in a grid.
plot2rst_plot_tag : str
When this tag is found in the example file, the current plot is saved and
tag is replaced with plot path. Defaults to 'PLOT2RST.current_figure'.
Suggested CSS definitions
-------------------------
div.body h2 {
border-bottom: 1px solid #BBB;
clear: left;
}
/*---- example gallery ----*/
.gallery.figure {
float: left;
margin: 1em;
}
.gallery.figure img{
display: block;
margin-left: auto;
margin-right: auto;
width: 200px;
}
.gallery.figure .caption {
width: 200px;
text-align: center !important;
}
"""
import os
import re
import shutil
import token
import tokenize
import traceback
import itertools
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from skimage import io
from skimage import transform
from skimage.util.dtype import dtype_range
from notebook_doc import Notebook
from docutils.core import publish_parts
from sphinx.domains.python import PythonDomain
LITERALINCLUDE = """
.. literalinclude:: {src_name}
:lines: {code_start}-
"""
CODE_LINK = """
**Python source code:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
NOTEBOOK_LINK = """
**IPython Notebook:** :download:`download <{0}>`
(generated using ``skimage`` |version|)
"""
TOCTREE_TEMPLATE = """
.. toctree::
:hidden:
%s
"""
IMAGE_TEMPLATE = """
.. image:: images/%s
:align: center
"""
GALLERY_IMAGE_TEMPLATE = """
.. figure:: %(thumb)s
:figclass: gallery
:target: ./%(source)s.html
:ref:`example_%(link_name)s`
"""
class Path(str):
"""Path object for manipulating directory and file paths."""
def __new__(self, path):
return str.__new__(self, path)
@property
def isdir(self):
return os.path.isdir(self)
@property
def exists(self):
"""Return True if path exists"""
return os.path.exists(self)
def pjoin(self, *args):
"""Join paths. `p` prefix prevents confusion with string method."""
return self.__class__(os.path.join(self, *args))
def psplit(self):
"""Split paths. `p` prefix prevents confusion with string method."""
return [self.__class__(p) for p in os.path.split(self)]
def makedirs(self):
if not self.exists:
os.makedirs(self)
def listdir(self):
return os.listdir(self)
def format(self, *args, **kwargs):
return self.__class__(super(Path, self).format(*args, **kwargs))
def __add__(self, other):
return self.__class__(super(Path, self).__add__(other))
def __iadd__(self, other):
return self.__add__(other)
def setup(app):
app.connect('builder-inited', generate_example_galleries)
app.add_config_value('plot2rst_paths',
('../examples', 'auto_examples'), True)
app.add_config_value('plot2rst_rcparams', {}, True)
app.add_config_value('plot2rst_default_thumb', None, True)
app.add_config_value('plot2rst_thumb_shape', (250, 300), True)
app.add_config_value('plot2rst_plot_tag', 'PLOT2RST.current_figure', True)
app.add_config_value('plot2rst_index_name', 'index', True)
def generate_example_galleries(app):
cfg = app.builder.config
if isinstance(cfg.source_suffix, list):
cfg.source_suffix_str = cfg.source_suffix[0]
else:
cfg.source_suffix_str = cfg.source_suffix
doc_src = Path(os.path.abspath(app.builder.srcdir)) # path/to/doc/source
if isinstance(cfg.plot2rst_paths, tuple):
cfg.plot2rst_paths = [cfg.plot2rst_paths]
for src_dest in cfg.plot2rst_paths:
plot_path, rst_path = [Path(p) for p in src_dest]
example_dir = doc_src.pjoin(plot_path)
rst_dir = doc_src.pjoin(rst_path)
generate_examples_and_gallery(example_dir, rst_dir, cfg)
def generate_examples_and_gallery(example_dir, rst_dir, cfg):
"""Generate rst from examples and create gallery to showcase examples."""
if not example_dir.exists:
print("No example directory found at", example_dir)
return
rst_dir.makedirs()
# we create an index.rst with all examples
with open(rst_dir.pjoin('index'+cfg.source_suffix_str), 'w') as gallery_index:
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
write_gallery(gallery_index, example_dir, rst_dir, cfg)
for d in sorted(example_dir.listdir()):
example_sub = example_dir.pjoin(d)
if example_sub.isdir:
rst_sub = rst_dir.pjoin(d)
rst_sub.makedirs()
write_gallery(gallery_index, example_sub, rst_sub, cfg, depth=1)
gallery_index.flush()
def write_gallery(gallery_index, src_dir, rst_dir, cfg, depth=0):
"""Generate the rst files for an example directory, i.e. gallery.
Write rst files from python examples and add example links to gallery.
Parameters
----------
gallery_index : file
Index file for plot gallery.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
index_name = cfg.plot2rst_index_name + cfg.source_suffix_str
gallery_template = src_dir.pjoin(index_name)
if not os.path.exists(gallery_template):
print(src_dir)
print(80*'_')
print('Example directory %s does not have a %s file'
% (src_dir, index_name))
print('Skipping this directory')
print(80*'_')
return
with open(gallery_template) as f:
gallery_description = f.read()
gallery_index.write('\n\n%s\n\n' % gallery_description)
rst_dir.makedirs()
examples = [fname for fname in sorted(src_dir.listdir(), key=_plots_first)
if fname.endswith('py')]
ex_names = [ex[:-3] for ex in examples] # strip '.py' extension
if depth == 0:
sub_dir = Path('')
else:
sub_dir_list = src_dir.psplit()[-depth:]
sub_dir = Path('/'.join(sub_dir_list) + '/')
joiner = '\n %s' % sub_dir
gallery_index.write(TOCTREE_TEMPLATE % (sub_dir + joiner.join(ex_names)))
for src_name in examples:
try:
write_example(src_name, src_dir, rst_dir, cfg)
except Exception:
print("Exception raised while running:")
print("%s in %s" % (src_name, src_dir))
print('~' * 60)
traceback.print_exc()
print('~' * 60)
continue
link_name = sub_dir.pjoin(src_name)
link_name = link_name.replace(os.path.sep, '_')
if link_name.startswith('._'):
link_name = link_name[2:]
info = {}
info['thumb'] = sub_dir.pjoin('images/thumb', src_name[:-3] + '.png')
info['source'] = sub_dir + src_name[:-3]
info['link_name'] = link_name
gallery_index.write(GALLERY_IMAGE_TEMPLATE % info)
def _plots_first(fname):
"""Decorate filename so that examples with plots are displayed first."""
if not (fname.startswith('plot') and fname.endswith('.py')):
return 'zz' + fname
return fname
def write_example(src_name, src_dir, rst_dir, cfg):
"""Write rst file from a given python example.
Parameters
----------
src_name : str
Name of example file.
src_dir : 'str'
Source directory for python examples.
rst_dir : 'str'
Destination directory for rst files generated from python examples.
cfg : config object
Sphinx config object created by Sphinx.
"""
last_dir = src_dir.psplit()[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = Path('')
else:
last_dir += '_'
src_path = src_dir.pjoin(src_name)
example_file = rst_dir.pjoin(src_name)
shutil.copyfile(src_path, example_file)
image_dir = rst_dir.pjoin('images')
thumb_dir = image_dir.pjoin('thumb')
notebook_dir = rst_dir.pjoin('notebook')
image_dir.makedirs()
thumb_dir.makedirs()
notebook_dir.makedirs()
base_image_name = os.path.splitext(src_name)[0]
image_path = image_dir.pjoin(base_image_name + '_{0}.png')
basename, py_ext = os.path.splitext(src_name)
rst_path = rst_dir.pjoin(basename + cfg.source_suffix_str)
notebook_path = notebook_dir.pjoin(basename + '.ipynb')
if _plots_are_current(src_path, image_path) and rst_path.exists and \
notebook_path.exists:
return
print('plot2rst: %s' % basename)
blocks = split_code_and_text_blocks(example_file)
if blocks[0][2].startswith('#!'):
blocks.pop(0) # don't add shebang line to rst file.
rst_link = '.. _example_%s:\n\n' % (last_dir + src_name)
figure_list, rst = process_blocks(blocks, src_path, image_path, cfg)
has_inline_plots = any(cfg.plot2rst_plot_tag in b[2] for b in blocks)
if has_inline_plots:
example_rst = ''.join([rst_link, rst])
else:
# print first block of text, display all plots, then display code.
first_text_block = [b for b in blocks if b[0] == 'text'][0]
label, (start, end), content = first_text_block
figure_list = save_all_figures(image_path)
rst_blocks = [IMAGE_TEMPLATE % f.lstrip('/') for f in figure_list]
example_rst = rst_link
example_rst += eval(content)
example_rst += ''.join(rst_blocks)
code_info = dict(src_name=src_name, code_start=end)
example_rst += LITERALINCLUDE.format(**code_info)
example_rst += CODE_LINK.format(src_name)
ipnotebook_name = src_name.replace('.py', '.ipynb')
ipnotebook_name = './notebook/' + ipnotebook_name
example_rst += NOTEBOOK_LINK.format(ipnotebook_name)
with open(rst_path, 'w') as f:
f.write(example_rst)
thumb_path = thumb_dir.pjoin(src_name[:-3] + '.png')
first_image_file = image_dir.pjoin(figure_list[0].lstrip('/'))
if first_image_file.exists:
first_image = io.imread(first_image_file)
save_thumbnail(first_image, thumb_path, cfg.plot2rst_thumb_shape)
if not thumb_path.exists:
if cfg.plot2rst_default_thumb is None:
print("WARNING: No plots found and default thumbnail not defined.")
print("Specify 'plot2rst_default_thumb' in Sphinx config file.")
else:
shutil.copy(cfg.plot2rst_default_thumb, thumb_path)
# Export example to IPython notebook
nb = Notebook()
# Add sphinx roles to the examples, otherwise docutils
# cannot compile the ReST for the notebook
sphinx_roles = PythonDomain.roles.keys()
preamble = '\n'.join('.. role:: py:{0}(literal)\n'.format(role)
for role in sphinx_roles)
# Grab all references to inject them in cells where needed
ref_regexp = re.compile('\n(\.\. \[(\d+)\].*(?:\n[ ]{7,8}.*)+)')
math_role_regexp = re.compile(':math:`(.*?)`')
text = '\n'.join((content for (cell_type, _, content) in blocks
if cell_type != 'code'))
references = re.findall(ref_regexp, text)
for (cell_type, _, content) in blocks:
if cell_type == 'code':
nb.add_cell(content, cell_type='code')
else:
if content.startswith('r'):
content = content.replace('r"""', '')
escaped = False
else:
content = content.replace('"""', '')
escaped = True
if not escaped:
content = content.replace("\\", "\\\\")
content = content.replace('.. seealso::', '**See also:**')
content = re.sub(math_role_regexp, r'$\1$', content)
# Remove math directive when rendering notebooks
# until we implement a smarter way of capturing and replacing
# its content
content = content.replace('.. math::', '')
if not content.strip():
continue
content = (preamble + content).rstrip('\n')
content = '\n'.join([line for line in content.split('\n') if
not line.startswith('.. image')])
# Remove reference links until we can figure out a better way to
# preserve them
for (reference, ref_id) in references:
ref_tag = '[{0}]_'.format(ref_id)
if ref_tag in content:
content = content.replace(ref_tag, ref_tag[:-1])
html = publish_parts(content, writer_name='html')['html_body']
nb.add_cell(html, cell_type='markdown')
with open(notebook_path, 'w') as f:
f.write(nb.json())
def save_thumbnail(image, thumb_path, shape):
"""Save image as a thumbnail with the specified shape.
The image is first resized to fit within the specified shape and then
centered in an array of the specified shape before saving.
"""
rescale = min(float(w_1) / w_2 for w_1, w_2 in zip(shape, image.shape))
small_shape = (rescale * np.asarray(image.shape[:2])).astype(int)
small_image = transform.resize(image, small_shape)
if len(image.shape) == 3:
shape = shape + (image.shape[2],)
background_value = dtype_range[small_image.dtype.type][1]
thumb = background_value * np.ones(shape, dtype=small_image.dtype)
i = (shape[0] - small_shape[0]) // 2
j = (shape[1] - small_shape[1]) // 2
thumb[i:i+small_shape[0], j:j+small_shape[1]] = small_image
io.imsave(thumb_path, thumb)
def _plots_are_current(src_path, image_path):
first_image_file = Path(image_path.format(1))
needs_replot = (not first_image_file.exists or
_mod_time(first_image_file) <= _mod_time(src_path))
return not needs_replot
def _mod_time(file_path):
return os.stat(file_path).st_mtime
def split_code_and_text_blocks(source_file):
"""Return list with source file separated into code and text blocks.
Returns
-------
blocks : list of (label, (start, end+1), content)
List where each element is a tuple with the label ('text' or 'code'),
the (start, end+1) line numbers, and content string of block.
"""
block_edges, idx_first_text_block = get_block_edges(source_file)
with open(source_file) as f:
source_lines = f.readlines()
# Every other block should be a text block
idx_text_block = np.arange(idx_first_text_block, len(block_edges), 2)
blocks = []
slice_ranges = zip(block_edges[:-1], block_edges[1:])
for i, (start, end) in enumerate(slice_ranges):
block_label = 'text' if i in idx_text_block else 'code'
# subtract 1 from indices b/c line numbers start at 1, not 0
content = ''.join(source_lines[start-1:end-1])
blocks.append((block_label, (start, end), content))
return blocks
def get_block_edges(source_file):
"""Return starting line numbers of code and text blocks
Returns
-------
block_edges : list of int
Line number for the start of each block. Note the
idx_first_text_block : {0 | 1}
0 if first block is text then, else 1 (second block better be text).
"""
block_edges = []
with open(source_file) as f:
token_iter = tokenize.generate_tokens(f.readline)
for token_tuple in token_iter:
t_id, t_str, (srow, scol), (erow, ecol), src_line = token_tuple
if (token.tok_name[t_id] == 'STRING' and scol == 0):
# Add one point to line after text (for later slicing)
block_edges.extend((srow, erow+1))
idx_first_text_block = 0
# when example doesn't start with text block.
if not block_edges[0] == 1:
block_edges.insert(0, 1)
idx_first_text_block = 1
# when example doesn't end with text block.
if not block_edges[-1] == erow: # iffy: I'm using end state of loop
block_edges.append(erow)
return block_edges, idx_first_text_block
def process_blocks(blocks, src_path, image_path, cfg):
"""Run source, save plots as images, and convert blocks to rst.
Parameters
----------
blocks : list of block tuples
Code and text blocks from example. See `split_code_and_text_blocks`.
src_path : str
Path to example file.
image_path : str
Path where plots are saved (format string which accepts figure number).
cfg : config object
Sphinx config object created by Sphinx.
Returns
-------
figure_list : list
List of figure names saved by the example.
rst_text : str
Text with code wrapped code-block directives.
"""
src_dir, src_name = src_path.psplit()
if not src_name.startswith('plot'):
return [], ''
# index of blocks which have inline plots
inline_tag = cfg.plot2rst_plot_tag
idx_inline_plot = [i for i, b in enumerate(blocks)
if inline_tag in b[2]]
image_dir, image_fmt_str = image_path.psplit()
figure_list = []
plt.rcdefaults()
plt.rcParams.update(cfg.plot2rst_rcparams)
plt.close('all')
example_globals = {}
rst_blocks = []
fig_num = 1
for i, (blabel, brange, bcontent) in enumerate(blocks):
if blabel == 'code':
exec(bcontent, example_globals)
rst_blocks.append(codestr2rst(bcontent))
else:
if i in idx_inline_plot:
plt.savefig(image_path.format(fig_num))
figure_name = image_fmt_str.format(fig_num)
fig_num += 1
figure_list.append(figure_name)
figure_link = os.path.join('images', figure_name)
bcontent = bcontent.replace(inline_tag, figure_link)
rst_blocks.append(docstr2rst(bcontent))
return figure_list, '\n'.join(rst_blocks)
def codestr2rst(codestr):
"""Return reStructuredText code block from code string"""
code_directive = ".. code-block:: python\n\n"
indented_block = '\t' + codestr.replace('\n', '\n\t')
return code_directive + indented_block
def docstr2rst(docstr):
"""Return reStructuredText from docstring"""
idx_whitespace = len(docstr.rstrip()) - len(docstr)
whitespace = docstr[idx_whitespace:]
return eval(docstr) + whitespace
def save_all_figures(image_path):
"""Save all matplotlib figures.
Parameters
----------
image_path : str
Path where plots are saved (format string which accepts figure number).
"""
figure_list = []
image_dir, image_fmt_str = image_path.psplit()
fig_mngr = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_num in (m.num for m in fig_mngr):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path.format(fig_num))
figure_list.append(image_fmt_str.format(fig_num))
return figure_list
| bsd-3-clause |
TheWylieStCoyote/gnuradio | gr-filter/examples/interpolate.py | 3 | 8164 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._interp))
print("Number of taps: ", len(self._taps))
print("Number of filters: ", self._interp)
print("Taps per channel: ", tpc)
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = numpy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_int / 2.0, fs_int / 2.0, fs_int / float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0 / fs_int
Tmax = len(d)*Ts_int
t_o = numpy.arange(0, Tmax, Ts_int)
x_o1 = numpy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_aint / 2.0, fs_aint / 2.0, fs_aint / float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0 / fs_aint
Tmax = len(d)*Ts_aint
t_o = numpy.arange(0, Tmax, Ts_aint)
x_o2 = numpy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mmottahedi/neuralnilm_prototype | scripts/e521.py | 2 | 6523 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter, RectangularOutputPlotter, StartEndMeanPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from neuralnilm.rectangulariser import rectangularise
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity, softmax
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
N_SEGMENTS = 3
MAX_TARGET_POWER = 300
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
['washer dryer', 'washing machine'],
'hair straighteners',
'television',
'dish washer'
],
max_appliance_powers=[MAX_TARGET_POWER, 2400, 500, 200, 2500],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 1800, 60, 60, 1800],
min_off_durations=[12, 600, 12, 12, 1800],
# window=("2013-03-18", None),
window=("2014-03-18", "2014-04-18"),
seq_length=512,
output_one_appliance=True,
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
independently_center_inputs=False,
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
target_is_start_and_end_and_mean=True,
# y_processing_func=lambda y: y / MAX_TARGET_POWER,
one_target_per_seq=False
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
50000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=Plotter(n_seq_to_plot=32)
plotter=StartEndMeanPlotter(n_seq_to_plot=16, max_target_power=MAX_TARGET_POWER)
)
def exp_a(name):
# conv, conv
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
logger=logging.getLogger(name)
))
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 16
target_seq_length = source.output_shape_after_processing()[1]
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 256,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': target_seq_length,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e521.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
beepee14/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
gtesei/fast-furious | dataset/images2/serializerDogsCatsSURF_Test_restore.py | 1 | 1779 | import mahotas as mh
from sklearn import cross_validation
from sklearn.linear_model.logistic import LogisticRegression
import numpy as np
from glob import glob
from edginess import edginess_sobel
def features_for(im):
im = mh.imread(im,as_grey=True).astype(np.uint8)
#return mh.features.haralick(im).mean(0)
return np.squeeze(mh.features.haralick(im)).reshape(-1)
features = []
####################################################
print('SURFing ...')
tfeatures = features
from sklearn.cluster import KMeans
from mahotas.features import surf
#basedir = 'small_train-dogs-cats'
#images = glob('{}/*.jpg'.format(basedir))
#alldescriptors = []
#i = 0;
#for im in images:
# im = mh.imread(im, as_grey=1)
# im = im.astype(np.uint8)
# alldescriptors.append(surf.surf(im, descriptor_only=True))
# i += 1
# print ('image:'+str(i))
print('Descriptors done')
k = 256
km = KMeans(k)
concatenated = np.loadtxt("test_SURF_concatenated.zat", delimiter=",")
#concatenated = np.concatenate(alldescriptors)
#concatenated = concatenated[::64]
print('k-meaning...')
km.fit(concatenated)
features = []
basedir = 'test_dogs_vs_cats'
images = glob('{}/*.jpg'.format(basedir))
ims = len(images)
#for im in images:
for i in range(1,ims+1):
im = 'test_dogs_vs_cats/'+str(i)+'.jpg'
print('processing ' +str(im) +' ...')
im = mh.imread(im, as_grey=1)
im = im.astype(np.uint8)
d = surf.surf(im, descriptor_only=True)
c = km.predict(d)
features.append(
np.array([np.sum(c == i) for i in xrange(k)])
)
features = np.array(features)
np.savetxt("test_K-MEANS-ON_TRAINSET_featuresDogsCatsSURF.zat", features, delimiter=",")
np.savetxt("test_SURF_concatenated2.zat", concatenated, delimiter=",")
| mit |
jorge2703/scikit-learn | sklearn/tests/test_random_projection.py | 142 | 14033 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [0, 1, 2])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
trondth/master | opinionholder.py | 1 | 79462 | ##!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import itertools
import json
from collections import Counter
from opinionexpressions import *
from sklearn import svm
from sklearn.feature_extraction import DictVectorizer
import numpy as np
import scipy
from scipy.sparse import csgraph
import re
from sklearn.externals import joblib
re_ose = re.compile(r'GATE_objective-speech-event')
re_ese = re.compile(r'GATE_expressive-subjectiv')
re_dse = re.compile(r'GATE_direct-subjectiv')
re_holder = re.compile(r'GATE_agent')
EXPTYPES = ['dse', 'ese', 'ose']
DEPREPS = ['dt', 'sb', 'conll']
DEBUG = False
DEBUGNOW = True
counters = Counter()
def read_model(filename):
"""
@param filename Input filename
@return Scikit-learn classifier
"""
return joblib.load(filename)
def write_model(clf, filename):
"""
@param filename Output filename
@param clf Classifier model
"""
joblib.dump(clf, filename)
def cleanholdercandidates(lst):
"""
Removes holder candidates. Modifies list of sentences.
@param lst List of sentences with list of tokens.
"""
for sent in lst:
for token in sent:
if 'holder_candidate' in token:
del token['holder_candidate']
def cleanholders(lst):
"""
Removes holders. Modifies list of sentences.
@param lst List of sentences with list of tokens.
"""
for sent in lst:
for token in sent:
token['holder'] = False
if 'holders' in token:
del token['holders']
def getexpressions_sent(sent, predict=False):
"""
Collects lists of expressions in phrases.
@param sent List of tokens in sent
@return Dictionary with list of expression
"""
expr = {}
if predict:
gatekey = 'PGATE'
else:
gatekey = 'GATE'
for exptype in EXPTYPES:
expr[exptype] = OrderedDict()
for i, t in enumerate(sent):
for gate in t[gatekey]:
if gate['slice'].start != gate['slice'].stop:
tmp = gate['ann_type']
if re_ose.match(tmp):
if gate['line_id'] not in expr['ose']:
expr['ose'][gate['line_id']] = {gatekey: gate,
'token_id': set([i+1])}
else:
expr['ose'][gate['line_id']]['token_id'].add(i+1)
elif re_ese.match(tmp): #tmp == 'GATE_expressive-subjectivity':
if gate['line_id'] not in expr['ese']:
expr['ese'][gate['line_id']] = {gatekey: gate,
'token_id': set([i+1])}
else:
expr['ese'][gate['line_id']]['token_id'].add(i+1)
elif re_dse.match(tmp): #tmp == 'GATE_direct-subjective':
if gate['line_id'] not in expr['dse']:
expr['dse'][gate['line_id']] = {gatekey: gate,
'token_id': set([i+1])}
else:
expr['dse'][gate['line_id']]['token_id'].add(i+1)
return expr
def tagholdercandidates_sent(sent, predict=False):
"""
Tags holder candidates for the different types of expressions.
Head of noun phrases are selected as holder candidates for an
expression type when they are not a part of an expression of that
type.
@param sent List of tokens in sent
@param duplicates Ignore holder candidates from subtree of a holder
"""
head_num = False
for i, token in enumerate(sent):
if 'daughters' not in token:
raise ValueError("Need to run daughterlists_sent first.")
token['holder_candidate'] = set()
if ('head' in token and token['head'] == '0'):
head_num = i
if ('head' in token and (token['pos'][:2] == 'NN' or token['pos'][:3] == 'PRP')):
head_id = int(token['head'])
if head_id != 0 and len(sent) >= head_id:
tmp_token = sent[head_id-1]
if not ('head' in tmp_token and
(tmp_token['pos'][:2] == 'NN' or tmp_token['pos'][:3] == 'PRP')):
if args.restrict == 'all':
add_this = True
for exptype in EXPTYPES:
if args.restrict == 'sameexp':
# Restrict holder candidates when building features instead
token['holder_candidate'].add(exptype)
else:
if predict:
tmpexp = 'P' + exptype
else:
tmpexp = exptype
try:
if not sent[i][tmpexp]:
if not args.restrict == 'all':
token['holder_candidate'].add(exptype)
else:
add_this = False
except:
print sent
raise
if args.restrict == 'all' and add_this:
for exptype in EXPTYPES:
token['holder_candidate'].add(exptype)
else:
if args.restrict == 'all':
add_this = True
for exptype in EXPTYPES:
#if i+1 not in rsets[exptype]:
if args.restrict == 'sameexp':
token['holder_candidate'].add(exptype)
else:
if predict:
tmpexp = 'P' + exptype
else:
tmpexp = exptype
if not sent[i][tmpexp]:
if args.restrict != 'all':
token['holder_candidate'].add(exptype)
else:
add_this = False
if args.restrict == 'all':
if add_this:
token['holder_candidate'].add(exptype)
if args.notoverlappingcandidates:
_tagholdercandidates_sent_follow_daughters(sent, head_num)
def _tagholdercandidates_sent_follow_daughters(sent, num):
# If HC, clean subtree
if len(sent[num]['holder_candidate']) > 0:
for d in sent[num]['daughters']:
_tagholdercandidates_sent_clean_subtree(sent, int(d)-1)
else:
for d in sent[num]['daughters']:
_tagholdercandidates_sent_follow_daughters(sent, int(d)-1)
def _tagholdercandidates_sent_clean_subtree(sent, num):
sent[num]['holder_candidate'] = set()
for n in sent[num]['daughters']:
_tagholdercandidates_sent_clean_subtree(sent, int(n)-1)
def getholder_exp_pairs_sent(sent, expr, holders, exptype=False, isolate_exp=True, test=False):
"""
Create a list of holder-expression-pairs.
For writer -
For implicit -
@param sent List of tokens
@param expr Dict with lists of expressions of the different types
@param holders List of opinion holders in the sentence
@return list of tuples (exp, holder, exptype, coref(or false, when there are no internal holders))
"""
tuples = []
if not exptype:
exptypelist = EXPTYPES
else:
exptypelist = [exptype]
for exptype in exptypelist:
#print exptype
for gate in expr[exptype].values():
tmp = False
try:
tmp = gate['GATE']['nested_source_split'][-1]
except:
# Some expressions lack nested-source
if DEBUG:
print 'missing nested-source for', gate['GATE']
tmp = False
counters['exp-pair no nested source'] += 1
if tmp:
if tmp in holders:
if isinstance(holders[tmp], OrderedDict):
coref = []
for h in holders[tmp].values():
coref.append(h['token_id'])
for h in holders[tmp].values():
tuples.append((gate['token_id'], h['token_id'], exptype, coref))
else:
tuples.append((gate['token_id'], holders[tmp]['token_id'], exptype, False))
elif tmp == 'writer' or tmp == 'w':
#print "w"
tuples.append((gate['token_id'], 'w', exptype, False))
else: #Implicit
#print "i"
tuples.append((gate['token_id'], 'implicit', exptype, False))
return tuples
def getholders_sent(sent):
"""
@param sent List of tokens in sentence
@return List of opinion holders in sent
"""
raise
holders = OrderedDict()
for i, token in enumerate(sent):
for gate in token['GATE']:
if re_holder.match(gate['ann_type']):
#print gate
ignore_holder = False
if 'nested_source_split' not in gate:
# Some GATE_agent have not id or nested-source
if 'id' not in gate:
ignore_holder = True
else:
tmp_id = gate['id']
else:
tmp_id = gate['nested_source_split'][-1]
if ignore_holder:
pass
counters['mpqa_expression_without_holder'] += 1
elif tmp_id not in holders:
holders[tmp_id] = {'GATE': gate,
'token_id': set([i+1])}
else:
holders[tmp_id]['token_id'].add(i+1)
return holders
def getholders_sent_new(sent):
"""
@param sent List of tokens in sentence
@return List of opinion holders in sent
"""
holders = OrderedDict()
for i, token in enumerate(sent):
for gate in token['GATE']:
if re_holder.match(gate['ann_type']):
#print gate
ignore_holder = False
if 'nested_source_split' not in gate:
# Some GATE_agent have not id or nested-source
if 'id' not in gate:
ignore_holder = True
else:
tmp_id = gate['id']
else:
tmp_id = gate['nested_source_split'][-1]
if ignore_holder:
pass
elif tmp_id not in holders:
holders[tmp_id] = OrderedDict()
holders[tmp_id][gate['line_id']] = {'GATE': gate,
'token_id': set([i+1])}
else:
if gate['line_id'] in holders[tmp_id]:
holders[tmp_id][gate['line_id']]['token_id'].add(i+1)
else:
holders[tmp_id][gate['line_id']] = {'GATE': gate,
'token_id': set([i+1])}
return holders
def daughterlists_sent(sent):
"""
Adds set of daughters for each token in sent.
@param sent List of tokens in sent
"""
for i, token in enumerate(sent):
if 'daughters' not in token:
token['daughters'] = set()
head = int(token['head'])
if 0 < head <= len(sent):
if 'daughters' in sent[head-1]:
sent[head-1]['daughters'].add(i+1)
else:
sent[head-1]['daughters'] = set([i+1])
elif head == 0:
pass
#print 'root'
else:
print u"ERROR in conll-file. head: {}, form: {} len(sent): {}".format(head, token['form'], len(sent))
print "Set head to 0"
token['head'] = 0
#raise ValueError(u"ERROR: {} {}. len(sent): {}".format(head, token['form'], len(sent)))
def cleandaughterlists(lst):
"""
Removes set of daughters for each token in each sent in list.
@param lst List of sentences with tokens
"""
for sent in lst:
for token in sent:
if 'daughters' in token:
del token['daughters']
def cleanpaths(sent):
for t in sent:
if 'paths' in t:
del t['paths']
def getgraph_sent(sent):
# http://docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
tmp = []
for i, t in enumerate(sent):
arr = [0]*len(sent)
for num in t['daughters']:
arr[num-1] = 1
if t['head'] != '0':
arr[int(t['head'])-1] = 1
tmp.append(arr)
graph = np.ma.masked_values(tmp, 0)
return csgraph.csgraph_from_dense(graph)
def getpaths_sent(graph):
return csgraph.shortest_path(graph, return_predecessors=True)
def print_path(paths, i1, i2):
i = i1
while i != i2:
print(i)
i = paths[1][i2, i]
def syntactic_path(cand, expr, sent, paths=False):
"""
@param cand Token number for holder candidate (starting on 1)
@param expr Token number for expression head (starting on 1)
@param sent List of tokens in sentence
@return unicode string
"""
agg_path = u''
if not paths:
dist, predec = getpaths_sent(getgraph_sent(sent))
else:
dist, predec = paths
# ↑
# ↓
i = i1 = cand - 1
i2 = expr -1
while i != i2:
if predec[i2, i]+1 == int(sent[i]['head']):
agg_path += sent[i]['deprel'] #unicode(i)
agg_path += u"↑"
elif predec[i2, i]+1 in sent[i]['daughters']:
agg_path += sent[predec[i2, i]]['deprel'] #unicode(i)
agg_path += u"↓"
else:
return "none"
print "FEIL - ingen path funnet"
i = predec[i2, i]
return agg_path
def get_predicates(sent):
preds = {}
count = 0
for t in sent:
if t['pred'][0] != '_':
preds[t['pred']] = count
count += 1
return preds
def shallow_sem_relation(cand, expr, sent):
preds = get_predicates(sent)
if sent[cand]['pred'] in preds:
pred_i = preds[sent[cand]['pred']]-1
tmp = sent[expr]['arg'][pred_i]
if tmp != '_':
return sent[expr]['arg'][pred_i]
elif sent[expr]['pred'] in preds:
pred_i = preds[sent[expr]['pred']]-1
tmp = sent[cand]['arg'][pred_i]
if tmp != '_':
return sent[cand]['arg'][pred_i]
else:
return False
def token_is_holder(num, sent, pairs, exptype):
pair_num = set()
for p in pairs:
if isinstance(p[1], set):
if num in p[1]:
pair_num.add(num)
else:
pass
else:
pass
if pair_num:
return (True, pair_num)
else:
return (False, pair_num)
def getholdercandidates_list_sent(sent):
hc = {}
for i, t in enumerate(sent):
if t['holder_candidate']:
for expt in t['holder_candidate']:
if expt not in hc:
hc[expt] = set([i+1])
else:
hc[expt].add(i+1)
return hc
def getex_head(ex_set, sent):
# return first that has head outside phrase
for num in ex_set:
#print ": ", sent[num-1]['head']
try:
if int(sent[num-1]['head']) not in ex_set:
return num
except:
print sent
print ex_set
print num
raise
def dom_ex_type(sent, head, transitive=False):
"""
Return a string representing the expression type(s) of head, if exists.
@param sent List of tokens in sentence.
@param head Token num for expression head
@return string
"""
dom_ex_type_str = ''
if not isinstance(head, int):
head = int(head)
if head == 0:
return False
if sent[head-1]['dse']:
dom_ex_type_str += 'dse'
if sent[head-1]['ese']:
dom_ex_type_str += 'ese'
if sent[head-1]['ose']:
dom_ex_type_str += 'ose'
if transitive and not dom_ex_type_str:
return dom_ex_type(sent, sent[head-1]['head'])
return dom_ex_type_str
def ex_verb_voice(sent, ex_set, be_outside_ex=True):
"""
Finds verb voice feature.
1. One of the tokens in the set must be partisip - VBG
2. One of the tokens in the set must be lemma 'be'
3. VBN's head has lemma 'be'
If none of the tokens is verb, returns string 'None'
@param sent List of tokens in sentence.
@param ex_set set of nums in expression
@return string Active, Passive or None
"""
criteria_1 = False
criteria_2 = False
criteria_3 = False
verb_exists = False
_slice = []
for num in ex_set:
_slice.append(num-1)
if sent[num-1]['pos'] == 'VBN':
criteria_1 = True
if sent[int(sent[num-1]['head'])-1]['lemma'] == 'be':
criteria_3 = True
if sent[num-1]['lemma'] == 'be':
criteria_2 = True
if sent[num-1]['pos'][0] == 'V':
verb_exists = True
if criteria_1 and criteria_2 and criteria_3:
return 'Passive'
elif criteria_1 and criteria_3 and be_outside_ex:
_slice.sort()
return 'Passive'
elif verb_exists:
return 'Active'
else:
return 'None'
def extolst(dict, gatekey='GATE'):
return_lst = []
for lst in dict.values():
for t in lst.values():
return_lst.append({'token_id': t['token_id'], 'expt': gatestr2label(t[gatekey]['ann_type'])})
return return_lst
def count_sys(lst, save=False):
return_lst = []
exp_seen = set()
exp_seen_set = set()
for item in lst:
if str(item[0]) not in exp_seen:
exp_seen.add(str(item[0]))
return_lst.append(item)
#if args.onlyinternals:
# if not isinstance(item[1], basestring):
# counters['sys_len_new' + item[2]] += 1
#else:
counters['sys_len_new' + item[2]] += 1
return return_lst
def count_gold(lst):
exp_seen = set()
exp_seen_set = set()
for item in lst:
if str(item[0]) not in exp_seen:
exp_seen.add(str(item[0]))
#if args.onlyinternals:
# if not isinstance(item[1], basestring):
# counters['gold_len_new' + item[2]] += 1
#else:
counters['gold_len_new' + item[2]] += 1
for item in lst:
if not item[0].intersection(exp_seen_set):
exp_seen_set = exp_seen_set | item[0]
counters['gold_len_ignoring_overlap' + item[2]] += 1
def getfeaturesandlabels(lst, exptype=False, semantic=True, predict=True):
"""
To use with evaluation. For each expression, it will return both the corresponding gold and predicted holder.
TODO - a version of this function without returning gold holders
"""
if 'PGATE' in lst[0][0]:
print "Get features from {} expressions.".format('predicted' if predict else 'gold')
else:
print "Get features from gold expressions. (No PGATE in token)"
predict = False
stats = {'holders_not_in_candidates': [],
'position': {},
'expt_not_in_candidates': []}
if not exptype:
exptypelist = EXPTYPES
features = {}
labels = {}
pos = {}
ev = evaluate()
for expt in EXPTYPES:
features[expt] = []
labels[expt] = []
pos[expt] = []
features[expt+'implicit'] = []
labels[expt+'implicit'] = []
pos[expt+'implicit'] = []
features[expt+'w'] = []
labels[expt+'w'] = []
pos[expt+'w'] = []
for sent_i, sent in enumerate(lst):
if DEBUG: print "---", sent_i
if sent_i % 1000 == 0: print "setning", sent_i
daughterlists_sent(sent)
ex = getexpressions_sent(sent)
pex = getexpressions_sent(sent, predict=predict)
tagholdercandidates_sent(sent, predict=predict)
candidates = getholdercandidates_list_sent(sent)
holder_dct = getholders_sent_new(sent)
holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct, test=predict)
count_gold(holder_exp_pairs)
if True: # syntactic_path
paths = getpaths_sent(getgraph_sent(sent))
else:
paths = False
if predict:
holder_exp_pairs_sys = []
for c, p in enumerate(extolst(pex, gatekey='PGATE')):
# first located e' that corresponded to e
argmaxcxe = 0 # at least some overlap
if args.argmaxcxe:
argmaxcxe = int(args.argmaxcxe)
current_pair = None
for exp_pair_i, exp_pair in enumerate(holder_exp_pairs):
#argmax c(x,e) regardless of exp type j&m 7.1.1
if DEBUG:
print exp_pair
cxe = ev.spancoverage(exp_pair[0], p['token_id'])
if DEBUG:
print cxe
if cxe > argmaxcxe:
argmaxcxe = cxe
current_pair = exp_pair
if current_pair:
holder_exp_pairs_sys.append((p['token_id'], current_pair[1], current_pair[2], current_pair[3]))
else:
counters['falsely_detected_exp'] += 1
counters['falsely_detected_exp' + p['expt']] += 1
if predict:
holder_exp_pairs_use = holder_exp_pairs_sys
else:
holder_exp_pairs_use = holder_exp_pairs
holder_exp_pairs_use = count_sys(holder_exp_pairs_use, save=True)
for exp_pair in holder_exp_pairs_use:
expt = exp_pair[2]
cand_exists = True
holder_set = True
# Categorise
if isinstance(exp_pair[1], str):
#if predict:
holder_set = False
elif isinstance(exp_pair[1], set):
# om holder ikke er hc
#print candidates
if expt in candidates:
if not exp_pair[1].intersection(candidates[expt]):
counters['holder_not_in_candidate_head'] += 1
cand_exists = False
for cand in candidates[expt]:
if exp_pair[1].intersection(get_subtree(sent, cand, transitive=True)):
cand_exists = True
if not cand_exists:
counters['holder_not_in_candidates'] += 1
counters['holder_not_in_candidates' + exp_pair[2]] += 1
stats['holders_not_in_candidates'].append({'candidates': candidates[expt],
'exp_pair': exp_pair})
else:
cand_exists = False
counters['ignore_count'] += 1
counters['holder not in candidates - special case'] += 1
#if cand_exists:
# For prediction:
elif isinstance(exp_pair[1], OrderedDict):
if expt in candidates:
holdermax = argmaxcxh(exp_pair[1], candidates[expt])
if not holdermax[0]:
cand_exists = False
counters['ignore_count'] += 1
else:
cand_exists = False
counters['expt_not_in_candidates - new'] += 1
stats['expt_not_in_candidates'].append({'sent': sent_i,
'exp_pair': exp_pair})
else:
raise Exception('exp_pair[1] of unknown type: {}'.format(exp_pair[1]))
if not predict or cand_exists:
# we don't need to count false predicted holders, the p. sum is already
# made, but we need these for training
# ext-classifiers (w/imp)
# labels
if exp_pair[1] == 'w':
labels[expt + 'w'].append(True)
labels[expt + 'implicit'].append(False)
elif exp_pair[1] == 'implicit':
labels[expt + 'w'].append(False)
labels[expt + 'implicit'].append(True)
else:
labels[expt + 'w'].append(False)
labels[expt + 'implicit'].append(False)
# Features
featuresdict = {}
ex_head = getex_head(exp_pair[0], sent)
featuresdict['ex_head_word'] = sent[ex_head-1]['form']
featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']
featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']
tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)
if tmp:
featuresdict['dom_ex_type'] = tmp
featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])
featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']
features[expt + 'w'].append(featuresdict)
#features[expt + 'implicit'].append(featuresdict)
pos[expt + 'w'].append({'sent': sent_i,
'exp': exp_pair[0],
'holder_gold': exp_pair[1],
'holder_sys': 'w'})
pos[expt + 'implicit'].append({'sent': sent_i,
'exp': exp_pair[0],
'holder_gold': exp_pair[1],
'holder_sys': 'implicit'})
if cand_exists:
# internals
if expt in candidates:
featuresandlabeladded = False
for cand in candidates[expt]:
if args.restrict == 'sameexp' and cand in exp_pair[0]: #get_subtree(sent, cand, transitive=True)):
pass
else:
featuresdict = {}
if holder_set:
featuresandlabeladded = True
# labels
if isinstance(exp_pair[1], OrderedDict):
label = cand_in_ghodct(cand, exp_pair[1])
if isinstance(exp_pair[1], set):
label = cand in exp_pair[1]
elif isinstance(exp_pair[1], str):
label = cand == exp_pair[1]
labels[expt].append(label)
# positions
pos[expt].append({'sent': sent_i,
'exp': exp_pair[0],
'holder_sys': get_subtree(sent, cand, transitive=True),
'holder_gold': exp_pair[1],
'coref_gold': exp_pair[3],
'exptype' : expt
})
# features
ex_head = getex_head(exp_pair[0], sent)
featuresdict['synt_path'] = syntactic_path(cand, ex_head,
sent, paths=paths)
if semantic:
tmp = shallow_sem_relation(cand-1, ex_head-1, sent)
if tmp:
featuresdict['shal_sem_rel'] = tmp
featuresdict['ex_head_word'] = sent[ex_head-1]['form']
featuresdict['ex_head_pos'] = sent[ex_head-1]['pos']
featuresdict['ex_head_lemma'] = sent[ex_head-1]['lemma']
featuresdict['cand_head_word'] = sent[cand-1]['form']
featuresdict['cand_head_pos'] = sent[cand-1]['pos']
tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)
if tmp:
featuresdict['dom_ex_type'] = tmp
featuresdict['ex_verb_voice'] = ex_verb_voice(sent, exp_pair[0])
if cand > 1:
featuresdict['context_r_word'] = sent[cand-2]['form']
featuresdict['context_r_pos'] = sent[cand-2]['pos']
if cand < len(sent):
featuresdict['context_l_word'] = sent[cand]['form']
featuresdict['context_l_pos'] = sent[cand]['pos']
featuresdict['deprel_to_parent'] = sent[ex_head-1]['deprel']
features[expt].append(featuresdict)
else:
counters["expt_not_in_candidates"] += 1
counters["expt_not_in_candidates" + expt] += 1
stats['positions'] = pos
return features, labels, stats
def argmaxcxh(ghodct, ph):
curmax = 0
cur = False
for h in ghodct.values():
cxh = spancoverage(ph, h['token_id'])
if cxh > curmax:
curmax = cxh
cur = h
return cur, curmax
def cand_in_ghodct(cand, ghodct):
for h in ghodct.values():
if cand in h['token_id']:
return True
return False
def create_matrix(features, labels):
"""
@return vec, X, y
"""
vec = DictVectorizer()
X = vec.fit_transform(features)
y = np.array(labels)
return vec, X, y
def transform_to_matrix(features, labels, vec):
"""
@return X, y
"""
X = vec.transform(features)
y = np.array(labels)
return X, y
def create_model(X, y):
try:
clf = svm.SVC(probability=True, kernel='linear')
clf.fit(X, y)
return clf
except:
return False
def token_exp(token, exptype=False):
"""
@return set of exp-line_ids from GATE
"""
line_ids = set()
for gate in token['GATE']:
if ((not exptype or exptype == 'ese') and re_ese.match(gate['ann_type'])):
line_ids.add(gate['line_id'])
if ((not exptype or exptype == 'ose') and re_ose.match(gate['ann_type'])):
line_ids.add(gate['line_id'])
if ((not exptype or exptype == 'dse') and re_dse.match(gate['ann_type'])):
line_ids.add(gate['line_id'])
return line_ids
def count_holder_candidates(lst, exptype=False, check_exp=False):
# TODO Check for individual exptypes
counters = {
'sents': 0,
'has_holder_candidate': 0,
'holders_are_not_candidates': 0,
'holders': 0,
'count_exp': 0
}
for sent in lst:
holder_candidates = 0
holders_are_not_candidates = False
exps = set()
for t in sent:
t_exp = token_exp(t, exptype)
exps |= t_exp
if 'holder_candidate' in t:
pass
return counters
def cleanupnonespanexpressions(lst, partial=False):
for sent in lst:
for t in sent:
t['dse'] = False
t['ese'] = False
t['ose'] = False
for gate in t['GATE']:
if partial:
if gate['slice'].start != gate['slice'].stop:
tmp = gate['ann_type']
#if tmp == 'GATE_objective-speech-event':
if re_ose.match(tmp):
t['ose'] = True #tmp[4]
elif re_ese.match(tmp): #tmp == 'GATE_expressive-subjectivity':
t['ese'] = True #tmp[4]
elif re_dse.match(tmp): #tmp == 'GATE_direct-subjective':
t['dse'] = True #tmp[4]
else:
str = t['slice'][6:-1].split(',')
tslice = slice(int(str[0]), int(str[1]))
if (gate['slice'].start != gate['slice'].stop and
gate['slice'].stop >= tslice.stop):
tmp = gate['ann_type']
#if tmp == 'GATE_objective-speech-event':
if re_ose.match(tmp):
t['ose'] = True #tmp[4]
elif re_ese.match(tmp): #tmp == 'GATE_expressive-subjectivity':
t['ese'] = True #tmp[4]
elif re_dse.match(tmp): #tmp == 'GATE_direct-subjective':
t['dse'] = True #tmp[4]
def count_holder_candidates_missing(lst, holder_exp_pairs):
holder_candidates = 0
holder_candidates_in_other_expression = 0
holder_candidates_in_other_expression_that_is_holder = 0
not_holder_candidates_that_is_holder = 0
sentences_without_holder_candidate = 0
sentences_without_holder_candidate_all = 0
sentences = 0
for i, sent in enumerate(lst):
sentences += 1
has_hc_all = False
has_hc = False
for t in sent:
if len(t['holder_candidate']) > 0:
holder_candidates += 1
has_hc_all = True
if len(t['holder_candidate']) < 3:
holder_candidates_in_other_expression += 1
else:
has_hc = True
if not has_hc:
sentences_without_holder_candidate += 1
print "HC: ", holder_candidates
print "HC, in expression of another type: ", holder_candidates_in_other_expression
print "Sents: ", sentences
print "Sents without HC: ", sentences_without_holder_candidate
print "Sents without HC (restriction on same expression type): ", sentences_without_holder_candidate_all
def count_span_shorter_than_token(lst):
sents = 0
tokens = 0
tokens_not_0 = 0
spans = 0
sent_n = 0
for sent in lst:
count_sent = False
token_n_hit = []
token_n = 0
for token in sent:
count_token = False
token_len = token['slice'].stop - token['slice'].start
for gate in token['GATE']:
gate_len = gate['slice'].stop - gate['slice'].start
if gate_len < token_len and gate_len != 0:
spans += 1
count_token = True
count_sent = True
if count_token:
tokens += 1
token_n_hit.append(token_n)
if token_n != 0:
tokens_not_0 += 1
token_n += 1
if count_sent:
print sent_n, token_n_hit
sents += 1
sent_n += 1
return {'sents': sents, 'tokens': tokens, 'spans': spans, 'tokens_not_0': tokens_not_0}
def get_subtree(sent, num, transitive=True):
# Gets the whole subtree, this is a problem with holders like michael hirsch ..., sent 1271 devtestset
span = set([num])
daughters = sent[num-1]['daughters']
if transitive:
for d in daughters:
span = span.union(get_subtree(sent, d, transitive=transitive))
else:
span = span.union(daughters)
return span
def find_ex_sent(lst):
for i, s in enumerate(lst):
crit1 = False
crit2 = False
crit3 = False
if len(s) < 10:
for t in s:
if t['ese'] or t['dse']: crit1 = True
if t['form'].lower() == 'the': crit2 = True
for g in t['GATE']:
if g['ann_type'] == 'GATE_agent':
crit3 = True
if crit1 and crit2 and crit3: print i
class evaluate:
def __init__(self, labels=EXPTYPES):
self.labels = []
for label in labels:
self.labels.extend([label, label + 'w', label + 'implicit'])
self.sums = {}
self.counts = {}
for label in self.labels:
self.sums[label] = {'p': 0, 'r': 0}
self.counts[label] = {'p': 0, 'r': 0}
self.current_ex = None
def spancoverage(self, span, spanprime):
if isinstance(span, basestring) or isinstance(spanprime, basestring):
if span == spanprime:
return 1
else:
return 0
tmp = span.intersection(spanprime)
if tmp:
return float(len(tmp)) / len(spanprime)
return 0
def spansetcoverage(self, spanset, spansetprime):
sum = 0.0
for spanprime in spansetprime:
for span in spanset:
sum += self.spancoverage(span, spanprime)
return sum
def get_unique_exp(self, s_p_g, exptype, count=True):
unique_exp_s_p = []
# cur = False
# for item in s_p_g:
# if cur and (cur['sent'] == item['sent'] and
# cur['exp'] == item['exp']):
# pass
# else:
# unique_exp_s_p.append(item)
# cur = item
exp_seen = set()
exp_seen_set = set()
for item in s_p_g:
if ('i' + str(item['exp']) + 's' + str(item['sent'])) not in exp_seen:
exp_seen.add('i' + str(item['exp']) + 's' + str(item['sent']))
counters['gold_len_new_getunique' + exptype] += 1
unique_exp_s_p.append(item)
#if args.onlyinternals:
# for item in unique_exp_s_p:
# if item['holder_gold'] == 'w':
# counter['g_holder_w_' + item['exp']] += 1
# if self.spancoverage(item['holder_gold'], item['holder_sys']) > 0:
# counter['s_holder_w_' + item['exp']] += 1
# if item['holder_gold'] == 'implicit':
# counter['g_holder_implicit_' + item['exp']] += 1
# if self.spancoverage(item['holder_gold'], item['holder_sys']) > 0:
# counter['g_holder_implicit_' + item['exp']] += 1
return unique_exp_s_p
def merge_system_pairs(self, s_p_int, s_p_imp=False, s_p_w=False):
"""
@param s_p_gold list of gold exp-holder pairs
@return List of system pairs for unique expressions with highest confidence score
"""
try:
if s_p_int:
counters['s_p_int'] = len(s_p_int)
if not s_p_imp and not s_p_w:
return s_p_int
except:
print "1029-feil"
print s_p_int
print s_p_imp
print s_p_w
s_p = []
if not s_p_imp:
s_p_imp = []
if not s_p_w:
s_p_w = []
if DEBUG:
for it in s_p_int:
print it['sent'], it['exp'], it['holder_gold']
for it in s_p_w:
print it['sent'], it['exp'], it['holder_gold']
for cur_int, cur_imp, cur_w in itertools.izip_longest(s_p_int, s_p_imp, s_p_w):
skipthis = False
if cur_int:
cur = cur_int
elif cur_imp:
cur = cur_imp
elif cur_w:
cur = cur_w
else:
print "THIS IS NOT A PAIR"
skipthis = True
if not skipthis:
if cur_imp and (cur_imp['confidence'] > 0.5 and cur_imp['confidence'] > cur['confidence']) or cur['confidence'] == 0:
if cur_imp['sent'] != cur['sent']:
raise
cur = cur_imp
if cur_w:
if cur_w['sent'] != cur['sent']:
print "int.. ", len(s_p_int)
print "imp.. ", len(s_p_imp)
print "w.. ", len(s_p_w)
print cur_w
print cur
raise
if (cur_w['confidence'] > 0.5 and cur_w['confidence'] > cur['confidence']) or cur['confidence'] == 0:
cur = cur_w
s_p.append(cur)
if DEBUG:
print "Pairs"
for p in s_p:
print p
return s_p
def get_system_pairs_prob(self, lst, results, gold_lst):
"""
Return a list of pairs detected by system and the confidence level.
For the gold expr, we can ignore the
"""
system_pairs = []
counters['getsp gold lst'] = len(gold_lst)
if isinstance(results, np.ndarray):
cur = None
curmax = -1
for i, item in enumerate(lst):
if cur and (item['sent'] != cur['sent'] or
item['exp'] != cur['exp']):
cur.update({'confidence': curmax})
system_pairs.append(cur)
curmax = -1
cur = None
if not cur:
curmax = results[i][1]
cur = item
if results[i][1] > curmax:
curmax = results[i][1]
cur = item
if cur:
cur.update({'confidence': curmax})
system_pairs.append(cur)
c = 0
s_p_new = []
for it in gold_lst:
if len(system_pairs) > c:
if (it['sent'] == system_pairs[c]['sent'] and
it['exp'] == system_pairs[c]['exp']):
s_p_new.append(system_pairs[c])
c += 1
else:
it['confidence'] = 0
s_p_new.append(it)
if DEBUG: print "skip", it
system_pairs = s_p_new
cur = False
for item in system_pairs:
if cur and (cur['sent'] == item['sent'] and
cur['exp'] == item['exp']):
print "MUL: ", cur, '\n', item
print "MULTIPLE EXP IN EXP_PAIRS"
raise
cur = item
return system_pairs
for i, item in enumerate(lst):
if results[i]:
system_pairs.append(item)
return system_pairs
def get_system_pairs(self, lst, results, s_p_imp=False, s_p_w=False):
"""
Return a list of pairs detected by system.
For the gold expr, we can ignore the
"""
system_pairs = []
if isinstance(results, np.ndarray):
cur = None
curmax = None
for i, item in enumerate(lst):
if cur and item['sent'] != cur['sent']:
system_pairs.append(cur)
cur = None
if not cur:
curmax = results[i][1]
cur = item
if results[i][1] > curmax:
curmax = results[i][1]
cur = item
if cur:
system_pairs.append(cur)
return system_pairs
for i, item in enumerate(lst):
if results[i]:
system_pairs.append(item)
return system_pairs
def spansetcoverage_o(self, lst):
prec_sum = 0.0
rec_sum = 0.0
for item in lst:
prec_sum += self.spancoverage(item['holder_sys'], item['holder_gold'])
rec_sum += self.spancoverage(item['holder_gold'], item['holder_sys'])
return {'p': prec_sum/len(lst), 'r': rec_sum/len(lst)}
def check_coref(self, coref, sys):
maxcxh = -1
argmaxcxh = False
for item in coref:
tmp = self.spancoverage(item, sys)
if tmp > maxcxh:
maxcxh = tmp
argmaxcxh = item
return argmaxcxh
def spansetcoverage_o_p(self, lst, exptype=False):
sys_len = 0
gold_len = 0
prec_sum = 0.0
rec_sum = 0.0
for item in lst:
if 'coref_gold' in item and len(item['coref_gold']) > 1:
holder_gold = self.check_coref(item['coref_gold'], item['holder_sys'])
else:
holder_gold = item['holder_gold']
rec_sum += self.spancoverage(item['holder_sys'], holder_gold)
prec_sum += self.spancoverage(holder_gold, item['holder_sys'])
if exptype:
gold_len = counters['gold_len_new' + exptype]
sys_len = (counters['sys_len_new' + exptype]
+ counters['falsely_detected_exp' + exptype])
if False: # args.onlyinternals:
sys_len -= counters['expt_not_in_candidates' + exptype]
else:
for exp in EXPTYPES:
gold_len += counters['gold_len_new' + exp]
sys_len += counters['sys_len_new' + exp]
sys_len += counters['falsely_detected_exp']
if False: # args.onlyinternals:
sys_len -= counters['expt_not_in_candidates']
if DEBUGNOW:
print "exptype: {}".format(exptype)
print "prec_sum: {} (del p s len)".format(prec_sum)
print "rec_sum: {} (del p g len)".format(rec_sum)
print 'gold len: {}'.format(gold_len)
print 'sys len: {}'.format(sys_len)
return {'p': prec_sum/sys_len, 'r': rec_sum/gold_len}
def print_tikzdep(sent):
for i, t in enumerate(sent):
if t['head'] == 0 or t['head'] == '0':
print "\deproot{" + str(i+1) + "}{ROOT}"
else:
print "\depedge{" + t['head'] + "}{" + str(i+1) + "}{" + t['deprel'] + '}'
def print_stats(tset, exptype=EXPTYPES, deprep=False):
cleanupnonespanexpressions(tset)
cleanholders(tset)
cleanholdercandidates(tset)
print "== deprep", deprep, "=="
f, l, s = getfeaturesandlabels(tset, semantic=False)
for exp in EXPTYPES:
print exp + ":", len(f[exp])
print exp + " w/imp:", len(f[exp + 'w'])
def print_eval(trainset, testset, exptypes=EXPTYPES, semantic=False, savemodels=False, loadmodels=False, deprep=False, externals=True, predict=True):
"""
Runs the system, prints P/R/F to stdout.
@param trainset list of sentences with lists of tokens
@param testset list of sentences with lists of tokens
"""
system_pairs = []
print "== cleaning lsts =="
cleanupnonespanexpressions(testset)
cleanholdercandidates(testset)
cleanholders(testset)
cleanupnonespanexpressions(trainset)
cleanholdercandidates(trainset)
cleanholders(trainset)
print "== train =="
ev = evaluate()
features, labels, stats = getfeaturesandlabels(trainset, semantic=semantic, predict=False)
print counters, '\n'
print "== test =="
counters.clear()
ftest, ltest, stest = getfeaturesandlabels(testset, semantic=semantic, predict=predict)
print counters
for exp in exptypes:
vec, X, y = create_matrix(features[exp], labels[exp])
if externals:
vecw, Xw, yw = create_matrix(features[exp + 'w'], labels[exp + 'w'])
vecimp, Ximp, yimp = create_matrix(features[exp + 'w'], labels[exp + 'implicit'])
if loadmodels:
clf = read_model(loadmodels + exp)
else:
clf = create_model(X, y)
if externals:
clfw = create_model(Xw, yw)
clfimp = create_model(Ximp, yimp)
if savemodels:
write_model(clf, savemodels + exp)
print "== eval =="
if deprep:
print "== {} ==".format(deprep)
Xt, yt = transform_to_matrix(ftest[exp], ltest[exp], vec)
if externals:
Xtw, ytw = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'w'], vecw)
Xtimp, ytimp = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'implicit'], vecimp)
results = clf.predict_proba(Xt)
s_p_w = False
s_p_imp = False
gold_p1 = ev.get_unique_exp(copy.deepcopy(stest['positions'][exp + 'w']), exp, count=False)
gold_p2 = copy.deepcopy(gold_p1)
gold_p3 = copy.deepcopy(gold_p1)
if clfw:
resultsw = clfw.predict_proba(Xtw)
s_p_w=ev.get_system_pairs_prob(stest['positions'][exp + 'w'], resultsw, gold_p1)
counters['s_p_w' + exp] = len(s_p_w)
if DEBUG:
print "RESULTSW"
print resultsw
if clfimp:
resultsimp = clfimp.predict_proba(Xtimp)
s_p_imp=ev.get_system_pairs_prob(stest['positions'][exp + 'implicit'], resultsimp, gold_p2)
counters['s_p_imp' + exp] = len(s_p_imp)
if DEBUG:
print "RESULTSIMP"
print resultsimp
s_p_int=ev.get_system_pairs_prob(stest['positions'][exp], results, gold_p3)
counters['s_p_int' + exp] = len(s_p_int)
system_pairs_exp = ev.merge_system_pairs(s_p_int, s_p_imp=s_p_imp, s_p_w=s_p_w)
counters['system_pairs_all' + exp] = len(system_pairs_exp)
for pair in system_pairs_exp:
if 'confidence' in pair and pair['confidence'] > 0:
counters['system_pairs' + exp] += 1
if predict:
ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)
print "system exp - {}:\n{}".format(exp, prf_prettystring(ssc_exp))
else:
ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)
print "gold exp - {}:\n{}".format(exp, prf_prettystring(ssc_exp))
system_pairs.extend(system_pairs_exp)
if predict:
ssc = ev.spansetcoverage_o_p(system_pairs)
print "system exp - all:\n", prf_prettystring(ssc)
else:
ssc = ev.spansetcoverage_o_p(system_pairs)
print "gold exp - all: \n", prf_prettystring(ssc)
for k,v in sorted(counters.items(), key=lambda x: x[0]):
print k, v
if isinstance(deprep, basestring):
dump_jsonfile(system_pairs, 'system_pairs-' + deprep + '.json')
return {'stats': stest, 'system_pairs': system_pairs}
def prf_prettystring(ssc=False, p=False, r=False):
if ssc:
p=ssc['p']
r=ssc['r']
return "P: {}\nR: {}\nF: {}\n".format(p, r, fscore(p, r))
def fscore(p, r):
if p + r == 0:
return 0
return 2 * p * r / (p + r)
def create_gates(lst):
# raise Exception
tmp_offset_start = 0
tmp_offset_end = 0
exp_count = 0
for sent in lst:
cur_exp = False
for token in sent:
tmp_offset_end = tmp_offset_start + len(token['form'])
token['slice'] = slice(tmp_offset_start, tmp_offset_end)
if 'label' not in token:
raise Exception
if token['label'] == 'O' or token['label'][0] == 'B':
if cur_exp:
cur_exp['PGATE'][0]['slice'] = slice(last_token['PGATE'][0]['slice_start'], last_token['slice'].stop)
cur_exp = False
token['Pdse'] = False
token['Pese'] = False
token['Pose'] = False
if token['label'][0] == 'B':
exp_count += 1
token['PGATE'] = [{'ann_type': labeltoanntype(token['label']),
'data_type': 'string',
'slice_start': tmp_offset_start,
'line_id': exp_count}]
token['P' + token['label'][2:]] = True
cur_exp = token
if token['label'][0] == 'I':
token['PGATE'] = last_token['PGATE']
token['P' + token['label'][2:]] = True
if 'PGATE' not in token:
token['PGATE'] = []
tmp_offset_start = tmp_offset_end + 1
last_token = token
# cleanup
if cur_exp:
cur_exp['PGATE'][0]['slice'] = slice(last_token['PGATE'][0]['slice_start'], last_token['slice'].stop)
for token in sent:
if 'Pdse' not in token:
print sent
raise
def labeltoanntype(label):
if label == "B-ESE" or label == "I-ESE":
return 'GATE_expressive-subjectivity'
if label == "B-OSE" or label == "I-OSE":
return 'GATE_objective-speech-event'
if label == "B-DSE" or label == "I-DSE":
return 'GATE_direct-subjective'
else:
print "Unknown label: {}".format(label)
raise Exception
def jointestandresult(tlst, rlst):
newlst = []
c = 0
if len(tlst) != len(rlst):
raise ValueError("Lists not equal length ({} / {})".format(len(tlst), len(rlst)))
for tsent,rsent in itertools.izip(tlst, rlst):
if len(tsent) != len(rsent):
raise ValueError("Sents not equal length: {}".format(c))
c += 1
newsent = []
for ttoken, rtoken in itertools.izip(tsent, rsent):
if ttoken['form'] != rtoken['form']:
print c
print "sent: {}\n{}\n "
print ttoken['form']
print rtoken['form']
newtoken = copy.deepcopy(ttoken)
newtoken['PGATE'] = rtoken['PGATE']
newtoken['Pdse'] = rtoken['Pdse']
newtoken['Pese'] = rtoken['Pese']
newtoken['Pose'] = rtoken['Pose']
newtoken['label'] = rtoken['label']
newtoken['label/score'] = rtoken['label/score']
newsent.append(newtoken)
newlst.append(newsent)
return newlst
def featurestats(lst, features='all'):
allfeatures = set(['synt_path', 'ex_head_word', 'ex_head_lemma', 'ex_head_pos', 'cand_head_pos', 'cand_head_word', 'dom_ex_type', 'ex_verb_voice', 'context_r_pos',
'context_r_word', 'context_l_pos', 'context_l_word', 'deprel_to_parent'])
if features == 'all':
features = allfeatures
if isinstance(features, basestring):
features = {features}
examplecount = 0
featurecounter = {}
featurecounters = {}
for exp in EXPTYPES:
featurecounters[exp] = {}
for it in allfeatures:
featurecounter[it] = Counter()
for exp in EXPTYPES:
featurecounters[exp][it] = Counter()
othercounters = Counter()
for i, sent in enumerate(lst):
ex = getexpressions_sent(sent)
holder_dct = getholders_sent_new(sent)
holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct)
for pair in holder_exp_pairs:
if pair[1] == 'w':
othercounters['w'] += 1
elif pair[1] == 'implicit':
othercounters['implicit'] += 1
elif isinstance(pair[1], OrderedDict):
othercounters['OrderedDict'] += 1
elif isinstance(pair[1], set):
ex_head = getex_head(pair[0], sent)
cand = getex_head(pair[1], sent)
othercounters['internal holders'] += 1
othercounters['internal holders' + pair[2]] += 1
# 'synt_path'
syntpath = syntactic_path(getex_head(pair[1], sent), ex_head, sent)
featurecounter['synt_path'][syntpath] += 1
featurecounters[pair[2]]['synt_path'][syntpath] += 1
othercounters['synt_path Length (only arrows)'] += syntpath.count(u'↑') + syntpath.count(u'↓')
othercounters['synt_path Length (only arrows)' + pair[2]] += syntpath.count(u'↑') + syntpath.count(u'↓')
# 'ex_head_word'
# 'ex_head_lemma'
# 'ex_head_pos'
featurecounter['ex_head_word'][sent[ex_head-1]['form']] += 1
featurecounter['ex_head_pos'][sent[ex_head-1]['pos']] += 1
featurecounter['ex_head_lemma'][sent[ex_head-1]['lemma']] += 1
# 'cand_head_pos'
## if DEBUG and examplecount < 5:
## if sent[getex_head(pair[1], sent)-1]['pos'] == 'JJ':
## print '\n\n'
## print sent
## print '\n\n'
## examplecount += 1
## featurecounter['cand_head_pos'][sent[getex_head(pair[1], sent)-1]['pos']] += 1
## featurecounters['cand_head_pos'][pair[2]][sent[getex_head(pair[1], sent)-1]['pos']] += 1
# 'cand_head_word'
featurecounter['cand_head_word'][sent[cand-1]['form']] += 1
featurecounter['cand_head_pos'][sent[cand-1]['pos']] += 1
# 'dom_ex_type'
tmp = dom_ex_type(sent, sent[ex_head-1]['head'], transitive=False)
if tmp:
featurecounter['dom_ex_type'][tmp] += 1
# 'ex_verb_voice'
featurecounter['ex_verb_voice'][ex_verb_voice(sent, pair[0])] += 1
# 'context_r_pos'
# 'context_r_word'
# 'context_l_pos'
# 'context_l_word'
if cand > 1:
featurecounter['context_r_word'][sent[cand-2]['form']] += 1
featurecounter['context_r_pos'][sent[cand-2]['pos']] += 1
if cand < len(sent):
featurecounter['context_l_word'][sent[cand]['form']] += 1
featurecounter['context_l_pos'][sent[cand]['pos']] += 1
# 'deprel_to_parent'
if 'deprel_to_parent' in features:
depreltoparent = sent[ex_head-1]['deprel']
featurecounter['deprel_to_parent'][depreltoparent] += 1
featurecounters[pair[2]]['deprel_to_parent'][depreltoparent] += 1
if 'synt_path' in args.featurestats:
othercounters['synt_path ' + 'Average length (only arrows)'] = (
othercounters['synt_path ' + 'Length (only arrows)'] / othercounters['internal holders'])
for exp in EXPTYPES:
othercounters['synt_path ' + 'Average length (only arrows) for', exp] = (
othercounters['synt_path ' + 'Length (only arrows)' + exp]) / othercounters['internal holders' + exp]
return featurecounter, featurecounters, othercounters
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-load", "--load-json-file", dest="load json-file",
help="Load json-file",
metavar="FILE")
parser.add_argument("-save", "--save-linear-training-file",
help="Save training file",
metavar="FILE")
parser.add_argument("-i", "--interactive", dest="interactive",
help="For interactive development",
action='store_true')
parser.add_argument("--pylab", dest="interactive",
help="For interactive development",
action='store_true')
parser.add_argument("--automagic", dest="interactive",
help="For interactive development",
action='store_true')
parser.add_argument("--semantic", dest="semantic",
help="use semantic features",
action="store_true")
parser.add_argument("-predict", "--predict", dest="predict",
help="Use predicted expressions", action='store_true')
parser.add_argument("-train", "--train-file", dest="train",
help="Create conll-file", action='store_true')
parser.add_argument("-test", "--test-file", dest="test",
help="Create conll-file", action='store_true')
parser.add_argument("--held-out", dest="heldout", help="Train on held-out",
action='store_true')
parser.add_argument("-e", "--eval", dest="eval", help="run system and print evaluation",
action='store_true')
parser.add_argument("-jtrain", dest="jtrain", metavar="FILE")
parser.add_argument("-jtest", dest="jtest", metavar="FILE")
parser.add_argument("-ctrain", dest="ctrain", metavar="FILE")
parser.add_argument("-ctest", dest="ctest", metavar="FILE")
parser.add_argument("-lthsrl", dest="lthsrl", action='store_true')
parser.add_argument("-argmaxcxe", help='a value below 0 will include system exp without overlap to a gold exp')
parser.add_argument("-stats", "--stats")
parser.add_argument("-notoverlappingcandidates", dest="notoverlappingcandidates", action='store_true')
# todo - bedre navn
parser.add_argument("-restrict", default='sameexp', choices=['all', 'sameexp', 'sametype'])
#parser.add_argument("-notsameexp", help="todo", action='store_true')
#parser.add_argument("-restrict_same_exp", help="todo", action='store_true')
#parser.add_argument("-restrict_same_type", help="todo", action='store_true')
parser.add_argument("-iob2", dest="iob2", help="Read output data from opinion expression detection", metavar="FILE")
parser.add_argument("-savejson", dest="savejson", metavar="FILE")
parser.add_argument("-savemodels", dest="savemodels", metavar="FILE")
parser.add_argument("-loadmodels", dest="loadmodels", metavar="FILE")
parser.add_argument("-loadjsonlist", metavar="FILE")
parser.add_argument("-featurestats", choices=[
'all', 'synt_path', 'ex_head_word', 'ex_head_lemma', 'ex_head_pos', 'cand_head_pos', 'cand_head_word', 'dom_ex_type', 'ex_verb_voice', 'context_r_pos',
'context_r_word', 'context_l_pos', 'context_l_word', 'deprel_to_parent'
])
args = parser.parse_args()
print "= ARGS =\n", args
if args.loadjsonlist:
print "= LOAD JSON ="
lst = read_jsonfile(args.loadjsonlist)
if args.featurestats:
for dep in DEPREPS:
if args.featurestats == 'all':
features = ['synt_path', 'ex_head_word', 'ex_head_lemma', 'ex_head_pos', 'cand_head_pos', 'cand_head_word', 'dom_ex_type', 'ex_verb_voice', 'context_r_pos',
'context_r_word', 'context_l_pos', 'context_l_word', 'deprel_to_parent']
else:
features = {args.featurestats}
print "\n= DEPREP: {} =".format(dep)
fs, fss, os = featurestats(lst['train'][dep] + lst['test'][dep], features=args.featurestats)
if 'synt_path' in features:
features.remove('synt_path')
print "\n= synt path ="
for it in fs['synt_path'].most_common(12):
print u"{} {}".format(it[0], it[1]).encode('utf-8')
print "= Number of different features ="
print len(fs['synt_path'])
print "\n= For specific exptypes = "
for exp in EXPTYPES:
print "\n= {} =".format(exp)
for it in fss[exp]['synt_path'].most_common(5):
print u"{} {}".format(it[0], it[1]).encode('utf-8')
print "= Number of different features ="
print len(fss[exp]['synt_path'])
print "= Other counts ="
for k, v in os.items():
print k, v
print "\n= Other features ="
for f in features:
print "\n= {} =".format(f)
print "Number of different features: ", len(fs[f])
print u"Most common feature: {}".format(fs[f].most_common(1)).encode('utf-8')
for exp in EXPTYPES:
print "Number of different features: ", len(fss[exp][f])
print "Most common feature: {}".format(fss[exp][f].most_common(1)).encode('utf-8')
if f == 'dom_ex_type':
for it in fs['dom_ex_type'].most_common():
print u"{} {}".format(it[0], it[1]).encode('utf-8')
if args.train or (args.eval and not (args.jtrain or args.loadmodels) ):
print "= TRAINSET ="
trainsentlst = createfile(opinionexp=False, opinionholder=True,
devset=False if args.heldout else True, testset=False)
dump_jsonfile(trainsentlst, DATA_PREFIX + '/out/' +
'heldouttrain' if args.heldout else 'dev' + "train.json")
trainfilename = writeconll2009(trainsentlst, DATA_PREFIX + "/out/" +
'heldouttrain.conll' if args.heldout else 'dev' + "train.conll")
if args.test or (args.eval and not (args.jtest or args.loadmodels)):
print "= TESTSET ="
testsentlst = createfile(opinionexp=False, opinionholder=True,
devset=False if args.heldout else True, testset=True)
dump_jsonfile(testsentlst, DATA_PREFIX + '/out/' +
'heldouttest' if args.heldout else 'dev' + "test.json")
testfilename = writeconll2009(testsentlst, DATA_PREFIX + "/out/" +
'heldouttest.conll' if args.heldout else 'dev' + "test.conll")
if args.jtrain:
print "= READ JSON ="
trainfilename = DATA_PREFIX + "/out/" + 'heldouttrain.conll' if args.heldout else 'dev' + "train.conll"
trainsentlst = read_jsonfile(args.jtrain)
if args.jtest:
testfilename = DATA_PREFIX + "/out/" + 'heldouttest.conll' if args.heldout else 'dev' + "test.conll"
testsentlst = read_jsonfile(args.jtest)
if args.iob2:
print "= READ IOB2 ="
tmp = readiob2(args.iob2)
create_gates(tmp)
testsentlst = jointestandresult(testsentlst,tmp)
if args.eval and not args.lthsrl:
print "= PARSE ="
bohnetnivre = bohnet_nivre.Bohnet_Nivre()
if not args.ctrain:
bohnet_nivre_output = bohnetnivre.run(trainfilename)
if not args.ctest:
bohnet_nivre_output = bohnetnivre.run(testfilename)
if args.eval and args.lthsrl:
print "= PARSE ="
lth_srl = lth_srl.Lth_srl()
if not args.ctrain:
writeconll(trainsentlst, trainfilename)
lth_srl_output = lth_srl.run(trainfilename)
if not args.ctest:
lth_srl_output = lth_srl.run(testfilename)
if args.ctrain:
trainfilename = args.ctrain
if args.ctest:
testfilename = args.ctest
if args.eval or args.stats:
print "= EVAL ="
trainsentlsts = {}
testsentlsts = {}
if args.lthsrl:
trainsentlst = readconlltolst(trainsentlst, trainfilename + ".out")
testsentlst = readconlltolst(testsentlst, testfilename + ".out")
#print trainsentlst[0]
#raise Exception
else:
for dr in DEPREPS:
print "= DEPREP: {} =".format(dr)
trainsentlsts[dr] = readconll2009tolst(trainsentlst, trainfilename + "." + dr)
testsentlsts[dr] = readconll2009tolst(testsentlst, testfilename + "." + dr)
#if args.run:
# print "Not implemented"
if args.stats:
if args.stats == "train":
if trainsentlst:
slst = trainsentlst
slsts = trainsentlsts
elif args.stats == "test":
if testsentlst:
slst = testsentlst
slsts = testsentlsts
if args.lthsrl:
print_stats(trainsentlst, deprep='conll-lthsrl-wo-semantic')
else:
for dr in DEPREPS:
print_stats(slsts[dr], deprep=dr)
if args.eval:
stats = {}
if args.lthsrl:
#dump_jsonfile(testsentlst, 'testsentlistdump.json')
stats['notsem'] = print_eval(trainsentlst, testsentlst, semantic=False, loadmodels=args.loadmodels, savemodels=args.savemodels, deprep='conll-lthsrl-wo-semantic', predict=args.predict)
stats['sem'] = print_eval(trainsentlst, testsentlst, semantic=True, loadmodels=args.loadmodels, savemodels=args.savemodels, deprep='conll-lthsrl-with-semantic', predict=args.predict)
else:
for dr in DEPREPS:
stats[dr] = print_eval(trainsentlsts[dr], testsentlsts[dr], semantic=False, loadmodels=args.loadmodels, savemodels=args.savemodels, deprep=dr, predict=args.predict)
if args.savejson:
print "= SAVE JSON-FILE ="
dump_jsonfile({'train': trainsentlsts, 'test': testsentlsts}, args.savejson)
if stats:
dump_jsonfile(stats, args.savejson + '.stats.json')
if args.interactive:
DEBUG = False
DEBUGNOW = True
print "Interactive"
print args
#test = "database.mpqa.2.0/docs/xbank/wsj_0768" # feil i opinion holder
#test = "/out/eksempler-background.txt"
#a = getopinionholder(test, examplerun=True)
#a_iob2 = readiob2(DATA_PREFIX + '/out/wsj_0768.iob2')
#create_gates(a_iob2)
#3a_j = jointestandresult(a, a_dt)
#a_dt = readconll2009tolst(a_j, DATA_PREFIX + '/out/wsj_0768.conll.dt')
#f,l,s = getfeaturesandlabels(a_dt, semantic=False)
## lst = read_jsonfile(DATA_PREFIX + '/out/dev/gold_exp/goldex-o-new.json')
#lst = lst['test']
#f,l,s = getfeaturesandlabels(foo['train']['sb'], semantic=False)
#minidevresult = readiob2(DATA_PREFIX + '/out/minidevresult.txt')
##minidevtest = createfile(opinionexp=False, opinionholder=True, doclistfile="/config/doclists/minitestset.txt")
##dump_jsonfile(minidevtest, DATA_PREFIX + '/out/minidevtest.txt')
# minidevtest = read_jsonfile(DATA_PREFIX + "/out/minidevtest.txt", object_hook=pickle_object)
# ##minidevtrain = createfile(opinionexp=False, opinionholder=True, doclistfile="/config/doclists/minitrainset.txt")
# #minidevresult_copy = copy.deepcopy(minidevresult)
# #create_gates(minidevresult_copy)
# ##minidevresult_copy_sb = readconll2009tolst(minidevresult_copy, 'minidevtest.conll.sb')
# #tlst = jointestandresult(minidevtest, minidevresult_copy)
# minidevtrain = read_jsonfile(DATA_PREFIX + "/out/minidevtrain.json", object_hook=json_slice)
# minidevtrain_sb = readconll2009tolst(minidevtrain, DATA_PREFIX + '/out/minidevtrain.conll.sb')
# ##minidevtrain_dt = readconll2009tolst(minidevtrain, 'minidevtrain.conll.dt')
# ##minidevtrain_conll = readconll2009tolst(minidevtrain, 'minidevtrain.conll.conll')
# #minidevtest_sb = readconll2009tolst(tlst, 'minidevtest.conll.sb')
# minidevtest_sb = readconll2009tolst(minidevtest, DATA_PREFIX + '/out/minidevtest.conll.sb')
#minidevtest_sb = readconll2009tolst(minidevtest, 'minidevtest.conll.sb')
#print_stats(minidevtest_sb, deprep='sb')
#cleanupnonespanexpressions(testset)
#cleanholdercandidates(testset)
#cleanholders(testset)
#cleanupnonespanexpressions(minidevtrain_sb)
#cleanholdercandidates(minidevtrain_sb)
#cleanholders(minidevtrain_sb)
#
#f,l,s = getfeaturesandlabels(minidevtrain_sb, semantic=False)
#sent = minidevtest_sb[6]
#sent = minidevtest[6]
#ex = getexpressions_sent(sent)
#ex = getexpressions_sent(sent, predict=False)
#pex = getexpressions_sent(sent, predict=True)
#holders = getholders_sent_new(sent)
#hep = getholder_exp_pairs_sent(sent, ex, holders)
#x = extolst(pex)
#tf,tl,ts = getfeaturesandlabels(minidevtest_sb[0:10], semantic=False)
#tf,tl,ts = getfeaturesandlabels(minidevtest_sb, semantic=False)
#print_eval(minidevtrain_sb, minidevtest_sb, semantic=False)
#print_eval(minidevtrain_sb, minidevtest_sb, semantic=False, predict=False)
#trlst = read_jsonfile(DATA_PREFIX + '/out/holder/devtrain.json')
#trlst_sb = readconll2009tolst(trlst, DATA_PREFIX + '/out/holder/devtrain.conll.sb')
#telst = read_jsonfile(DATA_PREFIX + '/out/holder/devtest.json', object_hook=json_slice)
#telst_sb = readconll2009tolst(telst, DATA_PREFIX + '/out/holder/devtest.conll.sb')
#telst_dt = readconll2009tolst(telst, DATA_PREFIX + '/out/holder/devtest.conll.dt')
#telst_conll = readconll2009tolst(telst, DATA_PREFIX + '/out/holder/devtest.conll.conll')
#telst_srl = readconlltolst(telst, DATA_PREFIX + '/out/devtest.conll.out')
#print_eval(trlst_sb, telst_sb, semantic=False)
# lth = lth_srl.Lth_srl()
#conlloutput = lth.run(DATA_PREFIX + "/out/tmp2.conll")
#a = read_jsonfile(DATA_PREFIX + "/out/holder-trening.json")
#a = [c[3]]
#a = trainsentlst_conll
#a = trainsentlst_dt
#a = minidevtrain_dt
##ev = evaluate()
#print_eval(trainsentlst_dt, testsentlst_dt, exptypes=['dse'], semantic=False)
#print_eval(trainsentlst_dt, testsentlst_dt[0:10], exptypes=['ese'], semantic=False)
#print_eval(trainsentlst_dt, testsentlst_dt[0:10], exptypes=['ose'], semantic=False)
#a = copy.deepcopy(a_dt)
#cleanupnonespanexpressions(telst_sb)
#cleanholdercandidates(telst_sb)
#cleanholders(telst_sb)
#######b = a[3500]
#semantic=False
########features, labels, stats = getfeaturesandlabels([b], exptype='ose', transitive=True)
#print_eval(a, devtestset, semantic=False)
#features, labels, stats = getfeaturesandlabels(a, transitive=True, semantic=semantic, predict=False)
#vec, X, y = create_matrix(features['dse'], labels['dse'])
#clf = create_model(X, y)
###
##devtestset = read_jsonfile(DATA_PREFIX + "/out/holder-test.json")
##devtestset = testsentlst_conll
##devtestset = testsentlst_dt
#devtestset = a_dt
#cleanupnonespanexpressions(devtestset)
#cleanholdercandidates(devtestset)
#cleanholders(devtestset)
#ftest, ltest, stest = getfeaturesandlabels([devtestset[10]], transitive=True, semantic=semantic)
#ftest, ltest, stest = getfeaturesandlabels(devtestset, transitive=True, semantic=semantic)
# Xt, yt = transform_to_matrix(ftest['ese'], ltest['ese'], vec)
# ###results = clf.predict(Xt)
# #results = clf.predict_log_proba(Xt)
# results = clf.predict_proba(Xt)
# ##ev = evaluate_hc_tokens(results, yt)
# #
# ev = evaluate()
# system_pairs = []
# system_pairs.extend(ev.get_system_pairs(stest['positions']['ese'], results))
# #for exp in EXPTYPES:
# # system_pairs.extend(eval.get_system_pairs(stest['positions'][exp], results))
# ssc = ev.spansetcoverage_o_p(system_pairs, exptype='ese')
# print "ssc: ", ssc
#daughterlists_sent(b)
#ex = getexpressions_sent(a[3500])
#########restr = getholdercandidatesrestrictionset(ex)
#holder_candidates = tagholdercandidates_sent(a[3500], ex)
#count = 0
#print len(telst_sb)
#for sent in telst_sb:
# ex = getexpressions_sent(sent)
# holder_dct = getholders_sent(sent)
# holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct)
# if holder_exp_pairs:
# #print holder_exp_pairs
# count += 1
#g = getgraph_sent(b)
#paths = getpaths_sent(g)
##daughterlists_sent(d)
##g2 = getgraph_sent(d)
##paths2 = getpaths_sent(g2)
#find_ex_sent(a)
#for i, t in enumerate(a[5364]): print i+1, t['form'], t['pos'], t['head'], 'DSE' if t['dse'] else ''
#sent = a[5364]
#SRI
test = "database.mpqa.2.0/docs/20020510/21.50.13-28912" # SRI
a = getopinionholder(test)
# ###b = writeconll(a, DATA_PREFIX + "/out/tmp2.conll")
# ### lth = lth_srl.Lth_srl()
# ###conlloutput = lth.run(DATA_PREFIX + "/out/tmp2.conll")
conlloutput = DATA_PREFIX + '/out/tmp2.conll.out'
c = readconlltolst(a, conlloutput)
sent = c[3]
# ####foo = getfeaturesandlabels([a[5364]])
daughterlists_sent(sent)
ex = getexpressions_sent(sent)
# tagholdercandidates_sent(sent, transitive=True) #False)
# candidates = getholdercandidates_list_sent(sent)
# ####print candidates
holder_dct = getholders_sent_new(sent)
# ####try:
holder_exp_pairs = getholder_exp_pairs_sent(sent, ex, holder_dct)
#print_tikzdep(sent)
#s = ["Google is n't universally loved by newspaper execs and other content providers . ".split(),
#"The bathroom was clean according to my husband .".split()]
#f = io.open(DATA_PREFIX + "/out/eksempler-background.conll", 'w')
#for sent in sents:
# for i, w in enumerate(sent):
# f.write(u"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
# i+1, # word id
# w, # word form
# u"_", #token['lemma'], # gold lemma #
# u"_", # pred lemma
# u"_", #token['pos'], # gold pos #
# u"_", # pred pos
# u"_", # gold feat
# u"_", # pred feat
# u"_", # gold head
# u"_", # pred head
# u"_", # gold label
# u"_", # pred label
# u"_" # arg
# ))
# f.write(u"\n")
#f.close()
#sents = readconll2009(DATA_PREFIX + '/out/eksempler-background.conll.dt')
#sents = readconll2009(DATA_PREFIX + '/out/eksempler-background.conll.sb')
#print_tikzdep(sents[0])
#a = [({4}, {1, 2, 3}, 'dse'), ({5}, 'implicit', 'dse')]
#b = [({4}, {1, 2, 3}, 'dse'), ({4,5}, 'implicit', 'dse')]
#c = [({4}, {1, 2, 3}, 'dse'), ({4}, 'implicit', 'dse'), ({6}, {1, 2, 3}, 'dse'), ({5, 6}, 'implicit', 'dse')]
#count_gold(a)
#count_gold(b)
#count_gold(c)
#print counters
"""
Out[99]: [({4}, {1, 2, 3}, 'dse'), ({5}, 'implicit', 'dse')]
In [76]: Interactive
1 Mugabe NNP 3
2 's POS 1
3 government NN 4
4 dismissed VBD 0 DSE
5 criticism NN 4 DSE
6 of IN 5
7 the DT 8
8 election NN 6
9 . . 4
"""
# He said that he liked ...
# Se hvilket span som har størst overlapp
| mit |
ghislainp/iris | docs/iris/src/userguide/regridding_plots/interpolate_column.py | 12 | 2039 |
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.quickplot as qplt
import iris.analysis
import matplotlib.pyplot as plt
import numpy as np
fname = iris.sample_data_path('hybrid_height.nc')
column = iris.load_cube(fname)[:, 0, 0]
alt_coord = column.coord('altitude')
# Interpolate the "perfect" linear interpolation. Really this is just
# a high number of interpolation points, in this case 1000 of them.
altitude_points = [('altitude', np.linspace(400, 1250, 1000))]
scheme = iris.analysis.Linear(extrapolation_mode='mask')
linear_column = column.interpolate(altitude_points, scheme)
# Now interpolate the data onto 10 evenly spaced altitude levels,
# as we did in the example.
altitude_points = [('altitude', np.linspace(400, 1250, 10))]
scheme = iris.analysis.Linear()
new_column = column.interpolate(altitude_points, scheme)
plt.figure(figsize=(5, 4), dpi=100)
# Plot the black markers for the original data.
qplt.plot(column, column.coord('altitude'),
marker='o', color='black', linestyle='', markersize=3,
label='Original values', zorder=2)
# Plot the gray line to display the linear interpolation.
qplt.plot(linear_column, linear_column.coord('altitude'),
color='gray',
label='Linear interpolation', zorder=0)
# Plot the red markers for the new data.
qplt.plot(new_column, new_column.coord('altitude'),
marker='D', color='red', linestyle='',
label='Interpolated values', zorder=1)
ax = plt.gca()
# Space the plot such that the labels appear correctly.
plt.subplots_adjust(left=0.17, bottom=0.14)
# Limit the plot to a maximum of 5 ticks.
ax.xaxis.get_major_locator().set_params(nbins=5)
# Prevent matplotlib from using "offset" notation on the xaxis.
ax.xaxis.get_major_formatter().set_useOffset(False)
# Put some space between the line and the axes.
ax.margins(0.05)
# Place gridlines and a legend.
ax.grid()
plt.legend(loc='lower right')
plt.show()
| gpl-3.0 |
emon10005/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
slimpotatoes/STEM_Moire_GPA | src/mask.py | 1 | 2214 | # Mask Module
import numpy as np
def mask_classic(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The classic mask takes the center of a
circle center = (xc,yc) and its radius r and put 1 if the image space is inside the circle and 0 outside. In
addition, the center of the mask is used to return g_0 that correspond to a first estimation of the unstrained
reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
mask = np.ndarray(shape=shape)
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
for i in range(0, shape[1]):
for j in range(0, shape[0]):
if ((i - center[1]) ** 2 + (j - center[0]) ** 2) < (r ** 2):
mask[i, j] = 1
else:
mask[i, j] = 0
return mask, g_0
def mask_gaussian(center, r, shape):
"""Return the mask function in the image space I defined by shape (see MIS). The Gaussian mask takes the center of a
circle center = (xc,yc) and its radius r to generate a 2D gaussian function centered around the circle. In
addition, the center of the mask is used to return g_0 that correspond to the unstrained reference."""
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing"""
g_0 = np.array([(center[1] - 0.5 * shape[0]) / shape[0] * np.ones(shape),
(center[0] - 0.5 * shape[1]) / shape[1] * np.ones(shape)])
"""Do not forget event coordinate (x,y) from matplotlib should be switched compared to numpy array indexing
- r corresponds to 3 * sigma => 99% gaussian mask included in circle"""
const = 1 / (2 * (r / 3) ** 2)
mesh_x, mesh_y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))
delta_x = (mesh_x - center[0]) ** 2
delta_y = (mesh_y - center[1]) ** 2
mask = np.exp(-(delta_x + delta_y) * const)
return mask, g_0
| bsd-3-clause |
xyguo/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
pvillela/ServerSim | other_minibatch_impls.py | 1 | 2054 | from typing import TYPE_CHECKING, Sequence, Tuple
import functools as ft
import pandas as pd
from livestats import livestats
if TYPE_CHECKING:
from serversim import UserGroup
def minibatch_resp_times_pandas1(time_resolution, grp):
# type: (float, UserGroup) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]
xys = (((svc_req.time_dict["submitted"]//time_resolution) * time_resolution,
svc_req.time_dict["completed"] - svc_req.time_dict["submitted"])
for (_, svc_req) in grp.svc_req_log
if svc_req.is_completed)
df = pd.DataFrame(xys, columns=["time", "resp_time"])
grouped = df.groupby("time")["resp_time"]
counts_ser = grouped.count()
ts = counts_ser.index.values
counts = counts_ser.values
means = grouped.mean().values
q_50 = grouped.quantile(.50).values
q_95 = grouped.quantile(.95).values
q_99 = grouped.quantile(.99).values
return ts, counts, means, q_50, q_95, q_99
def minibatch_resp_times_without_pandas(time_resolution, grp):
# type: (float, UserGroup) -> Tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float], Sequence[float]]
quantiles = [0.5, 0.95, 0.99]
xys = ((int(svc_req.time_dict["submitted"]/time_resolution),
svc_req.time_dict["completed"] - svc_req.time_dict["submitted"])
for (_, svc_req) in grp.svc_req_log
if svc_req.is_completed)
def ffold(map_, p):
x, y = p
if x not in map_:
map_[x] = livestats.LiveStats(quantiles)
map_[x].add(y)
return map_
xlvs = ft.reduce(ffold, xys, dict())
xs = xlvs.keys()
xs.sort()
ts = [x*time_resolution for x in xs]
counts = [xlvs[x].count for x in xs]
means = [xlvs[x].average for x in xs]
q_50 = [xlvs[x].quantiles()[0] for x in xs]
q_95 = [xlvs[x].quantiles()[1] for x in xs]
q_99 = [xlvs[x].quantiles()[2] for x in xs]
return ts, counts, means, q_50, q_95, q_99
| mit |
zonemercy/Kaggle | quora/pyfm/generate_len.py | 1 | 3008 | import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler
from sklearn.decomposition import TruncatedSVD,PCA
from sklearn.metrics.pairwise import cosine_similarity, pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
import distance
from nltk.corpus import stopwords
import nltk
SEED = 2048
np.random.seed(SEED)
PATH = os.path.expanduser("~") + "/data/quora/"
train = pd.read_csv(PATH+"train_porter.csv")#, nrows=5000).astype(str)
test = pd.read_csv(PATH+"test_porter.csv")#, nrows=5000).astype(str)
def str_abs_diff_len(str1, str2):
return abs(len(str1)-len(str2))
def str_len(str1):
return len(str(str1))
def char_len(str1):
str1_list = set(str(str1).replace(' ',''))
return len(str1_list)
def word_len(str1):
str1_list = str1.split(' ')
return len(str1_list)
stop_words = stopwords.words('english')
def word_match_share(row):
q1words = {}
q2words = {}
for word in str(row['question1']).lower().split():
if word not in stop_words:
q1words[word] = 1
for word in str(row['question2']).lower().split():
if word not in stop_words:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_words_in_q1 = [w for w in q1words.keys() if w in q2words]
shared_words_in_q2 = [w for w in q2words.keys() if w in q1words]
return (len(shared_words_in_q1) + len(shared_words_in_q2))*1.0/(len(q1words) + len(q2words))
print('Generate len')
feats = []
train['abs_diff_len'] = train.apply(lambda x:str_abs_diff_len(x['question1'],x['question2']),axis=1)
test['abs_diff_len']= test.apply(lambda x:str_abs_diff_len(x['question1'],x['question2']),axis=1)
feats.append('abs_diff_len')
train['R']=train.apply(word_match_share, axis=1, raw=True)
test['R']=test.apply(word_match_share, axis=1, raw=True)
feats.append('R')
train['common_words'] = train.apply(lambda x: len(set(str(x['question1'])
.lower().split()).intersection(set(str(x['question2']).lower().split()))), axis=1)
test['common_words'] = test.apply(lambda x: len(set(str(x['question1'])
.lower().split()).intersection(set(str(x['question2']).lower().split()))), axis=1)
feats.append('common_words')
for c in ['question1','question2']:
train['%s_char_len'%c] = train[c].apply(lambda x:char_len(x))
test['%s_char_len'%c] = test[c].apply(lambda x:char_len(x))
feats.append('%s_char_len'%c)
train['%s_str_len'%c] = train[c].apply(lambda x:str_len(x))
test['%s_str_len'%c] = test[c].apply(lambda x:str_len(x))
feats.append('%s_str_len'%c)
train['%s_word_len'%c] = train[c].apply(lambda x:word_len(x))
test['%s_word_len'%c] = test[c].apply(lambda x:word_len(x))
feats.append('%s_word_len'%c)
pd.to_pickle(train[feats].values,PATH+"train_len.pkl")
pd.to_pickle(test[feats].values,PATH+"test_len.pkl")
| mit |
erdc-cm/pygrib | test/animate.py | 1 | 1302 | import pygrib, time
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.animation as animation
# animation example.
grbs = pygrib.open('../sampledata/safrica.grib2')
# grab all "brightness temp" grib messages.
btemps = [grb for grb in grbs if grb['name']=='Brightness temperature']
grb = btemps[0]
lats, lons = grb.latlons()
projd = grb.projparams
grbs.close()
# create a map projection for the domain, plot 1st image on it.
m =\
Basemap(projection=projd['proj'],lat_ts=projd['lat_ts'],lon_0=projd['lon_0'],\
lat_0=projd['lat_0'],rsphere=(projd['a'],projd['b']),\
llcrnrlat=lats[0,0],urcrnrlat=lats[-1,-1],\
llcrnrlon=lons[0,0],urcrnrlon=lons[-1,-1],resolution='i')
fig = plt.figure(figsize=(8,7))
m.drawcoastlines()
m.drawcountries()
grb = btemps[0]
im = m.imshow(grb['values'],interpolation='nearest',vmin=230,vmax=310)
plt.colorbar(orientation='horizontal')
m.drawparallels(np.arange(-80,10,10),labels=[1,0,0,0])
m.drawmeridians(np.arange(-80,81,20),labels=[0,0,0,1])
txt = plt.title(grb,fontsize=8)
def updatefig(nt):
global im,txt,btemps,cnt,delay
grb = btemps[nt]
im.set_data(grb['values'])
txt.set_text(repr(grb))
ani = animation.FuncAnimation(fig, updatefig, frames=len(btemps))
plt.show()
| isc |
naturali/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn.py | 6 | 10199 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
def null_input_op_fn(x):
"""This function does no transformation on the inputs, used as default."""
return x
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow RNN Classifier model."""
def __init__(self,
rnn_size,
n_classes,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNClassifier instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length, self.initial_state,
self.attn_length, self.attn_size,
self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('logistic_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('logistic_regression/weights')
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow RNN Regressor model."""
def __init__(self,
rnn_size,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
n_classes=0,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNRegressor instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
* 0: the algorithm and debug information is muted.
* 1: trainer prints the progress.
* 2: log device placement is printed.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression, self.sequence_length,
self.initial_state, self.attn_length,
self.attn_size, self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('linear_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('linear_regression/weights')
| apache-2.0 |
Titan-C/scikit-learn | setup.py | 9 | 10275 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
import traceback
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.13.3'
NUMPY_MIN_VERSION = '1.8.2'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
traceback.print_exc()
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
kjung/scikit-learn | sklearn/model_selection/_split.py | 2 | 57489 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Girsel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import inspect
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_folds=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_folds=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold and StratifiedKFold"""
@abstractmethod
def __init__(self, n_folds, shuffle, random_state):
if not isinstance(n_folds, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_folds, type(n_folds)))
n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_folds = n_folds
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_folds > n_samples:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(self.n_folds,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_folds
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_folds=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_folds`` folds have size
``n_samples // n_folds + 1``, other folds have size
``n_samples // n_folds``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_folds = self.n_folds
fold_sizes = (n_samples // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n_samples % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_folds=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_folds=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_folds=3):
super(LabelKFold, self).__init__(n_folds, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_folds > n_labels:
raise ValueError("Cannot have number of folds n_folds=%d greater"
" than the number of labels: %d."
% (self.n_folds, n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_folds):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_folds=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_folds=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_folds)``, the last one has
the complementary.
"""
def __init__(self, n_folds=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_folds, shuffle, random_state)
self.shuffle = shuffle
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_folds > y_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_folds)) as data to the KFold
per_cls_cvs = [
KFold(self.n_folds, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_folds)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_folds)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_folds):
yield test_folds == i
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedKFold, self).split(X, y, labels)
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_iter
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_iter=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_iter=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_iter=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_iter : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_iter=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_iter=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_iter, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_iter):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < n_train or len(test) < n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
train.extend(missing_indices[:(n_train - len(train))])
test.extend(missing_indices[-(n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedShuffleSplit, self).split(X, y, labels)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
fermiPy/lcpipe | fpLC_one.py | 1 | 2608 | ## script to plot the LC output
## from fermipy,given a .npy file
## Sara Buson, Oct. 2017
## very basics, more coming
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import sys
def plotLC(lc, f_scale=1e-8, save=False):
plt.rcParams['legend.handlelength'] = 2.4
plt.rcParams['legend.numpoints'] = 1
plt.rcParams['legend.handletextpad']=0.9
plt.rcParams['legend.markerscale']=0
#plt.rcParams['lines.linewidth']=0
left = 0.075 # the left side of the subplots of the figure
right = 0.975 # the right side of the subplots of the figure
bottom = 0.06 # the bottom of the subplots of the figure
top = 0.95 # the top of the subplots of the figure
wspace = 0.08 # the amount of width reserved for blank space between subplots
hspace = 0.3 # the amount of height reserved for white space between subplots
grid_size = (1, 1)
fig, axs = plt.subplots(nrows=1, ncols=1, sharex=False,figsize=(12,8))
""" FERMIPY LC """
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
""" -- reading the LC output --- """
s=lc.split('/')
src=s[-1].split('_lightcurve.npy')[0]
o = np.load(lc).flat[0]
ts = o['ts']
mjd=o['tmin_mjd']
mjd_width = mjd[1]-mjd[0]
mjd_middle=mjd+mjd_width
flux=o['flux']/f_scale
flux_err=o['flux_err']/f_scale
flux_ul = o['flux_ul95']/f_scale
f_scale_lab=str(f_scale).split('-0')[-1]
ax0 =plt.subplot2grid(grid_size, (0, 0), rowspan=1, colspan=1) ## <<--------
ax0.set_ylabel('[$10^{-%s} ph cm^{-2} s^{-1}$]'%f_scale_lab)
ax0.set_xlabel('Time [MJD]')
ax0.grid()
ts_mask = ts>4
ts_mask = np.asarray(ts_mask)
plt.errorbar(mjd_middle[ts_mask],flux[ts_mask], xerr=mjd_width, yerr=flux_err[ts_mask],
color='orange',marker='o',markersize=4,ls='none',label='%s (%i-day binning)'%(src,mjd_width))#,xnewF, F_(xnewF),'-',xnewF1, F_(xnewF1),'-',lw=2,label='LAT',color='green')#, xnew, f2(xnew), '--')
plt.plot(mjd_middle[~ts_mask],flux_ul[~ts_mask],color='grey',marker='v', ls='none',label='95% upper limits, if TS<4')
## coming..
## to be included if the Sun is within the ROI
## plt. plot([timeSun0,timeSun0],[0,5], label='SUN',ls='dashed', c='red',linewidth=2.0)
leg0 = ax0.legend()
plt.legend(loc='upper left')
ax0.axes.get_xaxis().set_visible(True)
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top,
wspace=wspace, hspace=hspace)
if save==True: plt.savefig('%s_LC.pdf'%src,transparent=True)
plt.show()
if __name__ == "__main__":
#try:
lcfile=sys.argv[1]
plotLC(lcfile,save=True)
#except: print 'usage:: python LC_file.npy'
| bsd-3-clause |
peastman/msmbuilder | msmbuilder/project_templates/dihedrals/featurize-plot.py | 9 | 1427 | """Plot diagnostic feature info
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from msmbuilder.io import load_trajs
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ftrajs = load_trajs('ftrajs')
# (stride by 100 for memory concerns)
fxx = np.concatenate([fx[::100] for fx in ftrajs.values()])
## Box and whisker plot
def plot_box(ax):
n_feats_plot = min(fxx.shape[1], 100)
ax.boxplot(fxx[:, :100],
boxprops={'color': colors[0]},
whiskerprops={'color': colors[0]},
capprops={'color': colors[0]},
medianprops={'color': colors[2]},
)
if fxx.shape[1] > 100:
ax.annotate("(Only showing the first 100 features)",
xy=(0.05, 0.95),
xycoords='axes fraction',
fontsize=14,
va='top',
)
ax.set_xlabel("Feature Index", fontsize=16)
xx = np.arange(0, n_feats_plot, 10)
ax.set_xticks(xx)
ax.set_xticklabels([str(x) for x in xx])
ax.set_xlim((0, n_feats_plot + 1))
ax.set_ylabel("Feature Value", fontsize=16)
## Plot
fig, ax = plt.subplots(figsize=(15, 5))
plot_box(ax)
fig.tight_layout()
fig.savefig("ftrajs-box.pdf")
# {{ xdg_open('ftrajs-box.pdf') }}
| lgpl-2.1 |
jorik041/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/implementacao-bigdataset/sent_classification_module.py | 1 | 15014 | #Modelos de classificacao
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn import tree
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn import svm
from sklearn.model_selection import GridSearchCV
#Metricas
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import roc_curve,auc,roc_auc_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import confusion_matrix
#Outros Sklearn
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_predict, KFold,GroupKFold
from sklearn.preprocessing import label_binarize
from sklearn.feature_extraction.text import TfidfTransformer
import matplotlib.pyplot as plt
import itertools
import numpy as np
import pandas as pd
import nltk
#nltk.download('stopwords')
import re
import csv
import json
import sys
import statistics
import math
from datetime import datetime
from class_roc import Roc
from unicodedata import normalize
class SentClassifiers():
def mread_csv(self,file):
df1 = None
try:
df1 = pd.read_csv('datasets/%s'%(file),sep=';',index_col=0,encoding ='ISO-8859-1',engine='python')
except:
df1 = pd.read_csv('datasets/%s'%(file),sep='\t',index_col=0,encoding ='ISO-8859-1',engine='python')
df1 = df1.reset_index()
return df1
def write_csv(self,data,file):
df = pd.DataFrame(data)
df.to_csv(file+'.csv', mode='a', sep=';',index=False, header=False)
def getSTrain():
tweets = db['sentiment_train'].find({},{'_id':0, 'index':0})
return tweets
def convert_df(self,df):
new_df = []
for d in df:
if d == 'Positivo' or d =='Positive':
new_df.append(1)
elif d == 'Neutro' or d =='Neutral':
new_df.append(0)
elif d == 'Negativo' or d == 'Negative':
new_df.append(-1)
return new_df
def clear(self,dataframe):
new_df = []
stem_pt = nltk.stem.SnowballStemmer('portuguese')
for df in dataframe:
expr = re.sub(r"http\S+", "", df)
expr = re.sub(r"[@#]\S+","",expr)
#expr = normalize('NFKD',expr).encode('ASCII','ignore').decode('ASCII')
filtrado = [w for w in nltk.regexp_tokenize(expr.lower(),"[\S]+") if not w in nltk.corpus.stopwords.words('portuguese')]
filtrado_steam = []
for f in filtrado:
filtrado_steam.append(stem_pt.stem(f))
frase = ""
for f in filtrado:
frase += f + " "
new_df.append(frase)
return new_df
def initial(self,file):
dataframe = self.mread_csv(file)
dataframe = dataframe.dropna()
new_df = pd.DataFrame()
new_df['opiniao'] = self.convert_df(dataframe['opiniao'])
new_df['tweet'] = self.clear(dataframe['tweet'])
new_df = new_df.reset_index()
return new_df
#construtor
def __init__(self,file=None,dataframe=None):
if dataframe is None:
self.train_df = self.initial(file)
self.array_train = self.train_df['tweet'].values
self.target_train = self.train_df['opiniao'].values
self.classifiers = []
self.df_pred = pd.DataFrame()
elif file is None:
dataframe['tweet'] = self.clear(dataframe['tweet'])
self.array_train = dataframe['tweet'].values
self.target_train = dataframe['sentiment'].values
self.classifiers = []
self.df_pred = pd.DataFrame()
else:
print('parametro incorreto')
def find_tweet(self):
pos = self.mread_csv('freq_pos3')['pt'].values
neu = self.mread_csv('freq_neu3')['pt'].values
neg = self.mread_csv('freq_neg3')['pt'].values
df = pd.DataFrame()
#self.array_train,self.target_train
tupla = zip(neg,neu,pos)
X = []
y = []
tweets = self.array_train
for (ng,n,p) in tupla:
for index in range(len(tweets)):
text = self.array_train[index]
target = self.target_train[index]
if not(text.find(ng) == -1):
X.append(text)
y.append(target)
#print('Text: %s, targ: %s'%(text,target))
if not(text.find(n) == -1):
X.append(text)
y.append(target)
#print('Text: %s, targ: %s'%(text,target))
if not(text.find(p) == -1):
X.append(text)
y.append(target)
#print('Text: %s, targ: %s'%(text,target))
return X,y
def validation_words(self,model,train,target):
X_mod,y_mod = self.find_tweet()
count_vect = CountVectorizer()
X_train = count_vect.fit_transform(train)
X_mod = count_vect.transform(X_mod)
ac_v = []
cm_v = []
p_v = []
r_v = []
f1_v = []
e_v = []
fpr = []
tpr = []
roc_auc_ = []
for i in range(5):
model.fit(X_mod,y_mod)
pred = model.predict(X_train)
ac = accuracy_score(target, pred)
p = precision_score(target, pred,average='weighted')
r = recall_score(target, pred,average='weighted')
f1 = (2*p*r)/(p+r)
e = mean_squared_error(target, pred)
cm = confusion_matrix(target,pred)
cm_v.append(cm)
ac_v.append(ac)
p_v.append(p)
r_v.append(r)
f1_v.append(f1)
e_v.append(e)
ac = statistics.median(ac_v)
p = statistics.median(p_v)
f1 = statistics.median(f1_v)
r = statistics.median(r_v)
e = statistics.median(e_v)
cm_median = self.matrix_confuse_median(cm_v)
return ac,ac_v,p,r,f1,e,cm_median
def cross_apply(self,model,train,target):
count_vect = CountVectorizer()
X = count_vect.fit_transform(train)
kf = KFold(10, shuffle=True, random_state=1)
ac_v = []
cm_v = []
p_v = []
r_v = []
f1_v = []
e_v = []
fpr = []
tpr = []
roc_auc_ = []
predicts = []
for train_index,teste_index in kf.split(X,target):
X_train, X_test = X[train_index],X[teste_index]
y_train, y_test = target[train_index], target[teste_index]
model.fit(X_train,y_train)
pred = model.predict(X_test)
ac = accuracy_score(y_test, pred)
p = precision_score(y_test, pred,average='weighted')
r = recall_score(y_test, pred,average='weighted')
f1 = (2*p*r)/(p+r)
e = mean_squared_error(y_test, pred)
cm = confusion_matrix(y_test,pred)
cm_v.append(cm)
ac_v.append(ac)
p_v.append(p)
r_v.append(r)
f1_v.append(f1)
e_v.append(e)
ac = statistics.median(ac_v)
p = statistics.median(p_v)
f1 = statistics.median(f1_v)
r = statistics.median(r_v)
e = statistics.median(e_v)
cm_median = self.matrix_confuse_median(ac_v,cm_v)
return predicts,ac,ac_v,p,r,f1,e,cm_median
def matrix_confuse_median(self,acc,cm):
for j in range(len(acc)):
for i in range(len(acc)-1):
if acc[i] > acc[i+1]:
aux = acc[i+1]
acc[i+1] = acc[i]
acc[i] = aux
aux2 = cm[i+1]
cm[i+1] = cm[i]
cm[i] = aux2
acc_median = (acc[4]+ acc[5])/2
cm_median = cm[5]
return cm_median
tab_aux = []
for i in range(len(tab_pred[md[0]][0])):
values = []
for m in md:
values.append(tab_pred[m][0][i])
tab_aux.append(values)
tab = dict()
for m in md:
tab[m] = []
for tb in tab_aux:
j = 0
for m in md:
tab[m].append(tb[j])
j += 1
return tab
def roc(self,cm):
n_classes = 3
#roc_auc = []
fpr = [0,1]
tpr = [0,1]
for c in cm:
re = []
esp = []
tp = 0
sm = 0
#sensibilidade
for i in range(n_classes):
tp = cm[i,i]
for j in range(n_classes):
sm += cm[i,j]
s = tp/sm
re.append(s)
fpr.append(s)
tn = 0
smn = 0
#Especificidade
for i in range(n_classes):
tn = cm[i,i]
for j in range(n_classes):
smn += cm[j,i]
e = 1-(tn/smn)
esp.append(e)
tpr.append(e)
roc = Roc()
fpr,tpr = np.array(fpr),np.array(tpr)
roc.set_fpr(np.sort(fpr))
roc.set_tpr(np.sort(tpr))
roc.set_auc(auc(roc.get_fpr(),roc.get_tpr()))
return roc
def calc_weigth(self,acc):
ac = []
soma = sum(acc)
for i in range(len(acc)):
ac.append(acc[i]/soma)
return ac
def plot_roc(self,fpr,tpr,roc_auc,color,label):
plt.figure()
lw = 2
plt.plot(fpr,tpr,color='red',lw=lw,label='UAC(%s = %0.2f)' % (label,roc_auc))
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('Taxa de Falso Positivo')
plt.ylabel('Taxa de Verdadeiro Positivo')
plt.title('Grafico ROC')
plt.legend(loc="lower right")
plt.savefig('Figuras/roc.png')
#plt.show()
def plot_roc_all(self,fpr,tpr,roc_auc,label):
plt.figure()
lw = 2
tam = len(fpr)
color = ['red','blue','yellow','green','purple','orange']
for i in range(tam):
plt.plot(fpr[i],tpr[i],color=color[i],lw=lw,label='UAC(%s = %0.2f)' % (label[i],roc_auc[i]))
#plt.plot(fpr[1],tpr[1],color='blue',lw=lw,label='UAC(%s = %0.2f)' % (label[1],roc_auc[1]))
#plt.plot(fpr[2],tpr[2],color='yellow',lw=lw,label='UAC(%s = %0.2f)' % (label[2],roc_auc[2]))
#plt.plot(fpr[3],tpr[3],color='green',lw=lw,label='UAC(%s = %0.2f)' % (label[3],roc_auc[3]))
#plt.plot(fpr[4],tpr[4],color='purple',lw=lw,label='UAC(%s = %0.2f)' % (label[4],roc_auc[4]))
#plt.plot(fpr[5],tpr[5],color='orange',lw=lw,label='UAC(%s = %0.2f)' % (label[5],roc_auc[5]))
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('Taxa de Falso Positivo')
plt.ylabel('Taxa de Verdadeiro Positivo')
plt.title('Grafico ROC')
plt.legend(loc="lower right")
plt.savefig('Figuras/roc.png')
#plt.show()
def plot_confuse_matrix(self,cm,title,file_name):
labels = ['Negativo', 'Neutro','Positivo']
cm = np.ceil(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title(title)
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
thresh = cm.max()/2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],horizontalalignment="center",color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.xlabel('Predito')
plt.ylabel('Verdadeiro')
plt.savefig('Figuras/%s.png'%(file_name))
#plt.show()
def box_plot(self,results,names,title,file):
fig = plt.figure()
fig.suptitle(title)
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.savefig('Figuras/%s.png'%(file))
#plt.show()
def CMultinomialNV(self):
parameters = {'alpha':[0.000001,0.00001,0.0001,0.001,0.1,1.0],'fit_prior':[True,False]}
grid_nb = GridSearchCV(MultinomialNB(),parameters)
#nb = MultinomialNB(alpha=0.000001)
self.classifiers.append(grid_nb)
#ac,ac_v,p,r,f1,e,cm = self.validation_words(grid_nb,self.array_train,self.target_train)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_nb,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
self.df_pred['nv'] = pred
return ac,ac_v,p,r,f1,e,cm,roc_
def CDecisionTree(self):
parameters = {'criterion':('gini','entropy'),'splitter':('best','random'),'max_features':('auto','log2','sqrt')}
grid_dt = GridSearchCV(tree.DecisionTreeClassifier(),parameters)
#dt = tree.DecisionTreeClassifier(criterion='gini')
self.classifiers.append(grid_dt)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_dt,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
self.df_pred['dt'] = pred
return ac,ac_v,p,r,f1,e,cm,roc_
def CSuportVectorMachine(self):
#parameters = {'kernel':('linear', 'rbf'), 'C':[10, 100]}
parameters = {'kernel': ['rbf','linear'], 'gamma': [1e-3, 1e-4],'C': [1, 10, 100, 1000],'decision_function_shape':['ovr','mutinomial']}
grid_svm = GridSearchCV(svm.SVC(),parameters)
#csvm = svm.SVC(kernel='linear',gamma=0.001,C=100,decision_function_shape='ovr')
self.classifiers.append(grid_svm)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_svm,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
self.df_pred['svm'] = pred
return ac,ac_v,p,r,f1,e,cm,roc_
def CRandomForest(self):
parameters = {'n_estimators':[1,5,10,20,30],'criterion':('gini','entropy')}
grid_rf = GridSearchCV(RandomForestClassifier(),parameters)
#rf = RandomForestClassifier(n_estimators=5,criterion='gini')
self.classifiers.append(grid_rf)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_rf,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
self.df_pred['rf'] = pred
return ac,ac_v,p,r,f1,e,cm,roc_
def CLogistRegression(self):
parameters = {'penalty':['l2'],'C':[0.000001,0.00001,0.0001,0.001,0.1,1.0],'solver':['newton-cg','lbfgs','sag'],'multi_class':['ovr']}
#newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’
#'penalty':('l1'),'C':[0.000001,0.00001,0.0001,0.001,0.1,1.0],'solver':['lbfgs', 'liblinear', 'sag', 'saga']
grid_lr = GridSearchCV(LogisticRegression(),parameters)
#lr = LogisticRegression(penalty='l2',multi_class='ovr')
self.classifiers.append(grid_lr)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_lr,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
self.df_pred['lr'] = pred
return ac,ac_v,p,r,f1,e,cm,roc_
def gradienteDesc(self):
parameters = {'loss':['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron','squared_loss', 'huber', 'epsilon_insensitive','squared_epsilon_insensitive'],
'penalty':['l1','l2'],'alpha':[0.000001,0.00001,0.0001,0.001,0.1,1.0],'learning_rate':['constant','optimal','invscaling'],'eta0':[0.01,0.1,1.0]}
grid_sgd = GridSearchCV(SGDClassifier(),parameters)
pred,ac,ac_v,p,r,f1,e,cm = self.cross_apply(grid_sgd,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm)
return ac,ac_v,p,r,f1,e,cm,roc_
def committee(self,pesos):
model = VotingClassifier(estimators=[('nv', self.classifiers[0]), ('svm',self.classifiers[1]), ('dt',self.classifiers[2]) ,('rf', self.classifiers[3]), ('lr',self.classifiers[4])], weights=pesos,voting='hard')
pred,ac,ac_v,p,r,f1,e,cm_median = self.cross_apply(model,self.array_train,self.target_train)
roc_ = Roc()
roc_ = self.roc(cm_median)
self.df_pred['cm'] = pred
return ac,ac_v,p,r,f1,e,cm_median,roc_
def pred_texts(self,dataset):
test = self.clear(dataset)
count_vect = CountVectorizer()
X = count_vect.fit_transform(test)
train = count_vect.transform(self.array_train)
parameters = {'loss':['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron','squared_loss', 'huber', 'epsilon_insensitive','squared_epsilon_insensitive'],
'penalty':['l1','l2'],'alpha':[0.000001,0.00001,0.0001,0.001,0.1,1.0],'learning_rate':['constant','optimal','invscaling'],'eta0':[0.01,0.1,1.0]}
sgd = GridSearchCV(SGDClassifier(),parameters)
sgd.fit(train,self.target_train)
pred = sgd.predict(X)
df = pd.DataFrame()
df['tweet'] = dataset
df['sentiment'] = pred
return df
| gpl-3.0 |
cloudera/ibis | ibis/backends/pandas/tests/test_client.py | 1 | 2936 | import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
from .. import connect
from ..client import PandasTable # noqa: E402
pytestmark = pytest.mark.pandas
@pytest.fixture
def client():
return connect(
{
'df': pd.DataFrame({'a': [1, 2, 3], 'b': list('abc')}),
'df_unknown': pd.DataFrame(
{'array_of_strings': [['a', 'b'], [], ['c']]}
),
}
)
@pytest.fixture
def table(client):
return client.table('df')
@pytest.fixture
def test_data():
test_data = test_data = pd.DataFrame(
{"A": [1, 2, 3, 4, 5], "B": list("abcde")}
)
return test_data
def test_client_table(table):
assert isinstance(table.op(), ibis.expr.operations.DatabaseTable)
assert isinstance(table.op(), PandasTable)
def test_client_table_repr(table):
assert 'PandasTable' in repr(table)
def test_load_data(client, test_data):
client.load_data('testing', test_data)
assert client.exists_table('testing')
assert client.get_schema('testing')
def test_create_table(client, test_data):
client.create_table('testing', obj=test_data)
assert client.exists_table('testing')
client.create_table('testingschema', schema=client.get_schema('testing'))
assert client.exists_table('testingschema')
def test_literal(client):
lit = ibis.literal(1)
result = client.execute(lit)
assert result == 1
def test_list_tables(client):
assert client.list_tables(like='df_unknown')
assert not client.list_tables(like='not_in_the_database')
assert client.list_tables()
def test_read_with_undiscoverable_type(client):
with pytest.raises(TypeError):
client.table('df_unknown')
def test_drop(table):
table = table.mutate(c=table.a)
expr = table.drop(['a'])
result = expr.execute()
expected = table[['b', 'c']].execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'unit',
[
param('Y', marks=pytest.mark.xfail(raises=TypeError)),
param('M', marks=pytest.mark.xfail(raises=TypeError)),
param('D', marks=pytest.mark.xfail(raises=TypeError)),
param('h', marks=pytest.mark.xfail(raises=TypeError)),
param('m', marks=pytest.mark.xfail(raises=TypeError)),
param('s', marks=pytest.mark.xfail(raises=TypeError)),
param('ms', marks=pytest.mark.xfail(raises=TypeError)),
param('us', marks=pytest.mark.xfail(raises=TypeError)),
'ns',
param('ps', marks=pytest.mark.xfail(raises=TypeError)),
param('fs', marks=pytest.mark.xfail(raises=TypeError)),
param('as', marks=pytest.mark.xfail(raises=TypeError)),
],
)
def test_datetime64_infer(client, unit):
value = np.datetime64('2018-01-02', unit)
expr = ibis.literal(value, type='timestamp')
result = client.execute(expr)
assert result == value
| apache-2.0 |
tonyroberts/mdf | mdf/tests/test_regression.py | 3 | 1840 | """
Unit tests for regression testing
"""
import unittest
import os
import pandas as pa
from datetime import datetime
import mdf.regression
from mdf import evalnode
@evalnode
def pid_test():
return os.getpid()
# used in test_regression_remnote_server_init
startup_data = {"cfg":{"paramA":"A"}}
def remote_server_init_func(startup_data):
"""
startup_data is a dict constructed by _start_pyro_subprocess
which will be passed to this callback function on the remote process.
startup_data will contain additional startup_data passed to mdf.regression.[get_contexts|run]
"""
_cfg = startup_data["cfg"]
assert _cfg["paramA"], "A"
class RemoteTest(unittest.TestCase):
def test_regression_contexts(self):
"""
simple test that creates two subprocesses and checks the
pids are different
"""
lhs, rhs = mdf.regression.get_contexts(None, None)
# test the pids for the two contexts are different
lhs_pid = lhs.get_value(pid_test)
rhs_pid = rhs.get_value(pid_test)
self.assertNotEqual(lhs_pid, rhs_pid)
def test_regression_remnote_server_init_func(self):
"""
simple test that creates two subprocesses and checks the
pids are different
"""
lhs, rhs = mdf.regression.get_contexts(None, None,
init_func=remote_server_init_func,
startup_data=startup_data)
def test_df_differ(self):
"""
tests the DataFrameDiffer
"""
date_range = pa.bdate_range(datetime.now(), periods=10)
df_differ = mdf.regression.DataFrameDiffer([pid_test])
diffs = mdf.regression.run(date_range, [df_differ], lhs=None, rhs=None)
self.assertTrue(diffs[0][0])
| mit |
hugobowne/scikit-learn | sklearn/metrics/tests/test_classification.py | 15 | 54365 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"]) * -1
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true,
rng.randint(-100, 100) * np.ones(20, dtype=int))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
smunix/ns-3-rfid | src/flow-monitor/examples/wifi-olsr-flowmon.py | 27 | 7354 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(1)))
onOffHelper.SetAttribute("OffTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(0)))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
app.Start(ns.core.Seconds(ns.core.UniformVariable(20, 30).GetValue()))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
astrofrog/numpy | numpy/lib/twodim_base.py | 10 | 23635 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty, diagonal
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0,1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0,1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k >= 0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, dtype=m.dtype)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N - 1):
X[:,i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent, interpolation='nearest')
<matplotlib.image.AxesImage object at ...>
>>> plt.colorbar()
<matplotlib.colorbar.Colorbar instance at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of a (N, N) array.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| bsd-3-clause |
stoqs/stoqs | stoqs/contrib/parquet/parquet2csv.py | 2 | 1105 | #!/usr/bin/env python
'''
Convert STOQS Measured Parameter Data Access .parquet output to CSV format.
'''
import argparse
import pandas as pd
import sys
instructions = f'''
Can be run in an Anaconda environment thusly...
First time - install necessary packages:
conda create --name stoqs-parquet python=3.8
conda activate stoqs-parquet
pip install pandas pyarrow fastparquet
Thereafter:
conda activate stoqs-parquet
{sys.argv[0]} --url ...
'''
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
epilog=instructions)
parser.add_argument('--url', action='store', help="The .parquet URL from STOQS",
required=True)
parser.add_argument('--out', action='store', help=("Optional output file name"
" - if not specified then send to stdout"))
args = parser.parse_args()
df = pd.read_parquet(args.url)
if args.out:
fh = open(args.out, 'w')
df.to_csv(fh)
fh.close()
else:
print(df.to_csv())
| gpl-3.0 |
shikhardb/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 93 | 3460 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/pyplot.py | 10 | 120496 | # Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
Provides a MATLAB-like plotting framework.
:mod:`~matplotlib.pylab` combines pyplot with numpy into a single namespace.
This is convenient for interactive work, but for programming it
is recommended that the namespaces be kept separate, e.g.::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
plt.plot(x, y)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import warnings
import matplotlib
import matplotlib.colorbar
from matplotlib import style
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.cbook import _string_to_bool
from matplotlib import docstring
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.figure import Figure, figaspect
from matplotlib.gridspec import GridSpec
from matplotlib.image import imread as _imread
from matplotlib.image import imsave as _imsave
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib import rc_context
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes, Subplot
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec, detrend_none, window_hanning
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap, register_cmap
import numpy as np
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.colors import normalize # for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from .ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'PyQt5.QtCore' in sys.modules and not backend == 'Qt5Agg':
import PyQt5.QtWidgets
if not PyQt5.QtWidgets.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt5Agg'
elif ('gtk' in sys.modules
and backend not in ('GTK', 'GTKAgg', 'GTKCairo')
and 'gi.repository.GObject' not in sys.modules):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
# import Tkinter
pass # what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
@docstring.copy_dedent(Artist.findobj)
def findobj(o=None, match=None, include_self=True):
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
def switch_backend(newbackend):
"""
Switch the default backend. This feature is **experimental**, and
is only expected to work switching to an image backend. e.g., if
you have a bunch of PostScript scripts that you want to run from
an interactive ipython session, you may want to switch to the PS
backend before running them to avoid having a bunch of GUI windows
popup. If you try to interactively switch from one GUI backend to
another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global _backend_mod, new_figure_manager, draw_if_interactive, _show
matplotlib.use(newbackend, warn=False, force=True)
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
def show(*args, **kw):
"""
Display a figure.
When running in ipython with its pylab mode, display all
figures and return to the ipython prompt.
In non-interactive mode, display all figures and block until
the figures have been closed; in interactive mode it has no
effect unless figures were created prior to a change from
non-interactive to interactive mode (not recommended). In
that case it displays the figures but does not block.
A single experimental keyword argument, *block*, may be
set to True or False to override the blocking behavior
described above.
"""
global _show
return _show(*args, **kw)
def isinteractive():
"""
Return status of interactive mode.
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def pause(interval):
"""
Pause for *interval* seconds.
If there is an active figure it will be updated and displayed,
and the GUI event loop will run during the pause.
If there is no active figure, or if a non-interactive backend
is in use, this executes time.sleep(interval).
This can be used for crude animation. For more complex
animation, see :mod:`matplotlib.animation`.
This function is experimental; its behavior may be changed
or extended in a future release.
"""
backend = rcParams['backend']
if backend in _interactive_bk:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
canvas.draw()
show(block=False)
canvas.start_event_loop(interval)
return
# No on-screen figure is active, so sleep() is all we need.
import time
time.sleep(interval)
@docstring.copy_dedent(matplotlib.rc)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
@docstring.copy_dedent(matplotlib.rc_context)
def rc_context(rc=None, fname=None):
return matplotlib.rc_context(rc, fname)
@docstring.copy_dedent(matplotlib.rcdefaults)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
# The current "image" (ScalarMappable) is retrieved or set
# only via the pyplot interface using the following two
# functions:
def gci():
"""
Get the current colorable artist. Specifically, returns the
current :class:`~matplotlib.cm.ScalarMappable` instance (image or
patch collection), or *None* if no images or patch collections
have been defined. The commands :func:`~matplotlib.pyplot.imshow`
and :func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances. The
current image is an attribute of the current axes, or the nearest
earlier axes in the current figure that contains an image.
"""
return gcf()._gci()
def sci(im):
"""
Set the current image. This image will be the target of colormap
commands like :func:`~matplotlib.pyplot.jet`,
:func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`). The current image is an
attribute of the current axes.
"""
gca()._sci(im)
## Any Artist ##
# (getp is simply imported)
@docstring.copy(_setp)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
def xkcd(scale=1, length=100, randomness=2):
"""
Turns on `xkcd <http://xkcd.com/>`_ sketch-style drawing mode.
This will only have effect on things drawn after this function is
called.
For best results, the "Humor Sans" font should be installed: it is
not included with matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
from matplotlib import patheffects
context = rc_context()
try:
rcParams['font.family'] = ['Humor Sans', 'Comic Sans MS']
rcParams['font.size'] = 14.0
rcParams['path.sketch'] = (scale, length, randomness)
rcParams['path.effects'] = [
patheffects.withStroke(linewidth=4, foreground="w")]
rcParams['axes.linewidth'] = 1.5
rcParams['lines.linewidth'] = 2.0
rcParams['figure.facecolor'] = 'white'
rcParams['grid.linewidth'] = 0.0
rcParams['axes.unicode_minus'] = False
rcParams['axes.color_cycle'] = ['b', 'r', 'c', 'm']
rcParams['xtick.major.size'] = 8
rcParams['xtick.major.width'] = 3
rcParams['ytick.major.size'] = 8
rcParams['ytick.major.width'] = 3
except:
context.__exit__(*sys.exc_info())
raise
return context
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
**kwargs
):
"""
Creates a new figure.
Parameters
----------
num : integer or string, optional, default: none
If not provided, a new figure will be created, and the figure number
will be incremented. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : tuple of integers, optional, default: None
width, height in inches. If not provided, defaults to rc
figure.figsize.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to rc figure.dpi.
facecolor :
the background color. If not provided, defaults to rc figure.facecolor
edgecolor :
the border color. If not provided, defaults to rc figure.edgecolor
Returns
-------
figure : Figure
The Figure instance returned will also be passed to new_figure_manager
in the backends, which allows to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed to the figure init
function.
Notes
-----
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
rcParams defines the default values, which can be modified in the
matplotlibrc file
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif is_string_like(num):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
warnings.warn("close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if (max_open_warning >= 1 and
len(allnums) >= max_open_warning):
warnings.warn(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_open_warning`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Get a reference to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
fignum_exists = _pylab_helpers.Gcf.has_fignum
def get_fignums():
"""Return a list of existing figure numbers."""
fignums = list(six.iterkeys(_pylab_helpers.Gcf.figs))
fignums.sort()
return fignums
def get_figlabels():
"Return a list of existing figure labels."
figManagers = _pylab_helpers.Gcf.get_all_fig_managers()
figManagers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in figManagers]
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
@docstring.copy_dedent(FigureCanvasBase.mpl_connect)
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
@docstring.copy_dedent(FigureCanvasBase.mpl_disconnect)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
def close(*args):
"""
Close a figure window.
``close()`` by itself closes the current figure
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close(num)`` closes figure number *num*
``close(name)`` where *name* is a string, closes figure with that label
``close('all')`` closes all the figure windows
"""
if len(args) == 0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
return
else:
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args) == 1:
arg = args[0]
if arg == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(arg, six.integer_types):
_pylab_helpers.Gcf.destroy(arg)
elif hasattr(arg, 'int'):
# if we are dealing with a type UUID, we
# can use its integer representation
_pylab_helpers.Gcf.destroy(arg.int)
elif is_string_like(arg):
allLabels = get_figlabels()
if arg in allLabels:
num = get_fignums()[allLabels.index(arg)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(arg, Figure):
_pylab_helpers.Gcf.destroy_fig(arg)
else:
raise TypeError('Unrecognized argument type %s to close' % type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure.
"""
gcf().clf()
draw_if_interactive()
def draw():
"""
Redraw the current figure.
This is used in interactive mode to update a figure that
has been altered using one or more plot object method calls;
it is not needed if figure modification is done entirely
with pyplot functions, if a sequence of modifications ends
with a pyplot function, or if matplotlib is in non-interactive
mode and the sequence of modifications ends with :func:`show` or
:func:`savefig`.
A more object-oriented alternative, given any
:class:`~matplotlib.figure.Figure` instance, :attr:`fig`, that
was created using a :mod:`~matplotlib.pyplot` function, is::
fig.canvas.draw()
"""
get_current_fig_manager().canvas.draw()
@docstring.copy_dedent(Figure.savefig)
def savefig(*args, **kwargs):
fig = gcf()
res = fig.savefig(*args, **kwargs)
draw() # need this if 'transparent=True' to reset colors
return res
@docstring.copy_dedent(Figure.ginput)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
@docstring.copy_dedent(Figure.waitforbuttonpress)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
# Putting things in figures
@docstring.copy_dedent(Figure.text)
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.copy_dedent(Figure.suptitle)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.Appender("Addition kwargs: hold = [True|False] overrides default hold state", "\n")
@docstring.copy_dedent(Figure.figimage)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
#sci(ret) # JDH figimage should not set current image -- it is not mappable, etc
return ret
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes.
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
Call a function with hold(True).
Calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes to the figure.
The axes is added at position *rect* specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============== ==============================================
kwarg Accepts Description
======= ============== ==============================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute
with otherax
sharey otherax current axes shares yaxis attribute
with otherax
polar [True|False] use a polar axes?
aspect [str | num] ['equal', 'auto'] or a number. If a number
the ratio of x-unit/y-unit in screen-space.
Also see
:meth:`~matplotlib.axes.Axes.set_aspect`.
======= ============== ==============================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args) == 0:
return subplot(111, **kwargs)
if nargs > 1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
Remove an axes from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def sca(ax):
"""
Set the current Axes instance to *ax*.
The current Figure is updated to the parent of *ax*.
"""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
for m in managers:
if ax in m.canvas.figure.axes:
_pylab_helpers.Gcf.set_active(m)
m.canvas.figure.sca(ax)
return
raise ValueError("Axes instance argument was not found in a figure.")
def gca(**kwargs):
"""
Get the current :class:`~matplotlib.axes.Axes` instance on the
current figure matching the given keyword args, or create one.
Examples
---------
To get the current polar axes on the current figure::
plt.gca(projection='polar')
If the current axes doesn't exist, or isn't a polar one, the appropriate
axes will be created and then returned.
See Also
--------
matplotlib.figure.Figure.gca : The figure's gca method.
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Return a subplot axes positioned by the given grid definition.
Typical call signature::
subplot(nrows, ncols, plot_number)
Where *nrows* and *ncols* are used to notionally split the figure
into ``nrows * ncols`` sub-axes, and *plot_number* is used to identify
the particular subplot that this function is to create within the notional
grid. *plot_number* starts at 1, increments across rows first and has a
maximum of ``nrows * ncols``.
In the case when *nrows*, *ncols* and *plot_number* are all less than 10,
a convenience exists, such that the a 3 digit number can be given instead,
where the hundreds represent *nrows*, the tens represent *ncols* and the
units represent *plot_number*. For instance::
subplot(211)
produces a subaxes in a figure which represents the top plot (i.e. the
first) in a 2 row by 1 column notional grid (no grid actually exists,
but conceptually this is how the returned subplot has been positioned).
.. note::
Creating a new subplot with a position which is entirely inside a
pre-existing axes will trigger the larger axes to be deleted::
import matplotlib.pyplot as plt
# plot a line, implicitly creating a subplot(111)
plt.plot([1,2,3])
# now create a subplot which represents the top plot of a grid
# with 2 rows and 1 column. Since this subplot will overlap the
# first, the plot (and its axes) previously created, will be removed
plt.subplot(211)
plt.plot(range(12))
plt.subplot(212, axisbg='y') # creates 2nd subplot with yellow background
If you do not want this behavior, use the
:meth:`~matplotlib.figure.Figure.add_subplot` method or the
:func:`~matplotlib.pyplot.axes` function instead.
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to *False*.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :mod:`matplotlib.projections`.
.. seealso::
:func:`~matplotlib.pyplot.axes`
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pie_and_polar_charts/polar_scatter_demo.py`
For an example
**Example:**
.. plot:: mpl_examples/subplots_axes_and_figures/subplot_demo.py
"""
# if subplot called without arguments, create subplot(1,1,1)
if len(args)==0:
args=(1,1,1)
# This check was added because it is very easy to type
# subplot(1, 2, False) when subplots(1, 2, False) was intended
# (sharex=False, that is). In most cases, no error will
# ever occur, but mysterious behavior can result because what was
# intended to be the sharex argument is instead treated as a
# subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool) :
warnings.warn("The subplot index argument to subplot() appears"
" to be a boolean. Did you intend to use subplots()?")
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, gridspec_kw=None, **fig_kw):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
*nrows* : int
Number of rows of the subplot grid. Defaults to 1.
*ncols* : int
Number of columns of the subplot grid. Defaults to 1.
*sharex* : string or bool
If *True*, the X axis will be shared amongst all subplots. If
*True* and you have multiple rows, the x tick labels on all but
the last row of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a X axis.
If "col", each subplot column will share a X axis and the x tick
labels on all but the last row will have visible set to *False*.
*sharey* : string or bool
If *True*, the Y axis will be shared amongst all subplots. If
*True* and you have multiple columns, the y tick labels on all but
the first column of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a Y axis and the y tick
labels on all but the first column will have visible set to *False*.
If "col", each subplot column will share a Y axis.
*squeeze* : bool
If *True*, extra dimensions are squeezed out from the
returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy
object array of Axis objects are returned as numpy 1-d
arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d
array.
If *False*, no squeezing at all is done: the returned axis
object is always a 2-d array containing Axis instances, even if it
ends up being 1x1.
*subplot_kw* : dict
Dict with keywords passed to the
:meth:`~matplotlib.figure.Figure.add_subplot` call used to
create each subplots.
*gridspec_kw* : dict
Dict with keywords passed to the
:class:`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
*fig_kw* : dict
Dict with keywords passed to the :func:`figure` call. Note that all
keywords not recognized above will be automatically included here.
Returns:
fig, ax : tuple
- *fig* is the :class:`matplotlib.figure.Figure` object
- *ax* can be either a single axis object or an array of axis
objects if more than one subplot was created. The dimensions
of the resulting array can be controlled with the squeeze
keyword, see above.
Examples::
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share a X and Y axis with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# same as
plt.subplots(2, 2, sharex=True, sharey=True)
"""
# for backwards compatibility
if isinstance(sharex, bool):
if sharex:
sharex = "all"
else:
sharex = "none"
if isinstance(sharey, bool):
if sharey:
sharey = "all"
else:
sharey = "none"
share_values = ["all", "row", "col", "none"]
if sharex not in share_values:
# This check was added because it is very easy to type subplots(1, 2, 1)
# when subplot(1, 2, 1) was intended. In most cases, no error will
# ever occur, but mysterious behavior will result because what was
# intended to be the subplot index is instead treated as a bool for
# sharex.
if isinstance(sharex, int):
warnings.warn("sharex argument to subplots() was an integer."
" Did you intend to use subplot() (without 's')?")
raise ValueError("sharex [%s] must be one of %s" % \
(sharex, share_values))
if sharey not in share_values:
raise ValueError("sharey [%s] must be one of %s" % \
(sharey, share_values))
if subplot_kw is None:
subplot_kw = {}
if gridspec_kw is None:
gridspec_kw = {}
fig = figure(**fig_kw)
gs = GridSpec(nrows, ncols, **gridspec_kw)
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(gs[0, 0], **subplot_kw)
#if sharex:
# subplot_kw['sharex'] = ax0
#if sharey:
# subplot_kw['sharey'] = ax0
axarr[0] = ax0
r, c = np.mgrid[:nrows, :ncols]
r = r.flatten() * ncols
c = c.flatten()
lookup = {
"none": np.arange(nplots),
"all": np.zeros(nplots, dtype=int),
"row": r,
"col": c,
}
sxs = lookup[sharex]
sys = lookup[sharey]
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
if sxs[i] == i:
subplot_kw['sharex'] = None
else:
subplot_kw['sharex'] = axarr[sxs[i]]
if sys[i] == i:
subplot_kw['sharey'] = None
else:
subplot_kw['sharey'] = axarr[sys[i]]
axarr[i] = fig.add_subplot(gs[i // ncols, i % ncols], **subplot_kw)
# returned axis array will be always 2-d, even if nrows=ncols=1
axarr = axarr.reshape(nrows, ncols)
# turn off redundant tick labeling
if sharex in ["col", "all"] and nrows > 1:
#if sharex and nrows>1:
# turn off all but the bottom row
for ax in axarr[:-1, :].flat:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
if sharey in ["row", "all"] and ncols > 1:
#if sharey and ncols>1:
# turn off all but the first column
for ax in axarr[:, 1:].flat:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
ret = fig, axarr[0,0]
else:
ret = fig, axarr.squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
ret = fig, axarr.reshape(nrows, ncols)
return ret
def subplot2grid(shape, loc, rowspan=1, colspan=1, **kwargs):
"""
Create a subplot in a grid. The grid is specified by *shape*, at
location of *loc*, spanning *rowspan*, *colspan* cells in each
direction. The index for loc is 0-based. ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec=GridSpec(shape[0], shape[2])
subplotspec=gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
a = fig.add_subplot(subplotspec, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes that shares the *x*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the right, and the *ax2* instance is
returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
For an example
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes that shares the *y*-axis. The new axis will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the top, and the *ax2* instance is
returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
Tune the subplot layout.
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
The parameter meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for a figure.
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
fig = gcf()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
draw_if_interactive()
def box(on=None):
"""
Turn the axes box on or off. *on* may be a boolean or a string,
'on' or 'off'.
If *on* is *None*, toggle state.
"""
ax = gca()
on = _string_to_bool(on)
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set a title of the current axes.
Set one of the three available axes titles. The available titles are
positioned above the axes in the center, flush with the left edge,
and flush with the right edge.
.. seealso::
See :func:`~matplotlib.pyplot.text` for adding text
to the current axes
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is:
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
kwargs : text properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Convenience method to get or set axis properties.
Calling with no arguments::
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.::
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.::
>>> axis('off')
turns off the axis lines and labels.::
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.::
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.::
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in MATLAB.::
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.::
>>> axis('auto')
and::
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
For setting the x- and y-limits individually.
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis.
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Get or set the *x* limits of the current axes.
::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, e.g.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the x-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Get or set the *y*-limits of the current axes.
::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, e.g.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the y-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.dedent_interpd
def xscale(*args, **kwargs):
"""
Set the scaling of the *x*-axis.
call signature::
xscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_xscale(*args, **kwargs)
draw_if_interactive()
@docstring.dedent_interpd
def yscale(*args, **kwargs):
"""
Set the scaling of the *y*-axis.
call signature::
yscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_yscale(*args, **kwargs)
draw_if_interactive()
def xticks(*args, **kwargs):
"""
Get or set the *x*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
xticks( arange(12), calendar.month_name[1:13], rotation=17 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Get or set the *y*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
yticks( arange(12), calendar.month_name[1:13], rotation=45 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def minorticks_on():
"""
Display minor ticks on the current plot.
Displaying minor ticks reduces performance; turn them off using
minorticks_off() if drawing speed is a problem.
"""
gca().minorticks_on()
draw_if_interactive()
def minorticks_off():
"""
Remove minor ticks from the current plot.
"""
gca().minorticks_off()
draw_if_interactive()
def rgrids(*args, **kwargs):
"""
Get or set the radial gridlines on a polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_gridlines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Get or set the theta locations of the gridlines in a polar plot.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). e.g., 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
pass
def get_plot_commands():
"""
Get a sorted list of all of the plotting commands.
"""
# This works by searching for all functions in this module and
# removing a few hard-coded exclusions, as well as all of the
# colormap-setting functions, and anything marked as private with
# a preceding underscore.
import inspect
exclude = set(['colormaps', 'colors', 'connect', 'disconnect',
'get_plot_commands', 'get_current_fig_manager',
'ginput', 'plotting', 'waitforbuttonpress'])
exclude |= set(colormaps())
this_module = inspect.getmodule(get_plot_commands)
commands = set()
for name, obj in list(six.iteritems(globals())):
if name.startswith('_') or name in exclude:
continue
if inspect.isfunction(obj) and inspect.getmodule(obj) is this_module:
commands.add(name)
commands = list(commands)
commands.sort()
return commands
def colors():
"""
This is a do-nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic built-in colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red'
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background::
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turquoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
Matplotlib provides a number of colormaps, and others can be added using
:func:`~matplotlib.cm.register_cmap`. This function documents the built-in
colormaps, and will also return a list of all registered colormaps if called.
You can set the colormap for an image, pcolor, scatter, etc,
using a keyword argument::
imshow(X, cmap=cm.hot)
or using the :func:`set_cmap` function::
imshow(X)
pyplot.set_cmap('hot')
pyplot.set_cmap('jet')
In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
allowing you to see which one works best for your data.
All built-in colormaps can be reversed by appending ``_r``: For instance,
``gray_r`` is the reverse of ``gray``.
There are several common color schemes used in visualization:
Sequential schemes
for unipolar data that progresses from low to high
Diverging schemes
for bipolar data that emphasizes positive or negative deviations from a
central value
Cyclic schemes
meant for plotting values that wrap around at the
endpoints, such as phase angle, wind direction, or time of day
Qualitative schemes
for nominal data that has no inherent ordering, where color is used
only to distinguish categories
The base colormaps are derived from those of the same name provided
with Matlab:
========= =======================================================
Colormap Description
========= =======================================================
autumn sequential linearly-increasing shades of red-orange-yellow
bone sequential increasing black-white color map with
a tinge of blue, to emulate X-ray film
cool linearly-decreasing shades of cyan-magenta
copper sequential increasing shades of black-copper
flag repetitive red-white-blue-black pattern (not cyclic at
endpoints)
gray sequential linearly-increasing black-to-white
grayscale
hot sequential black-red-yellow-white, to emulate blackbody
radiation from an object at increasing temperatures
hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed
by changing the hue component in the HSV color space
jet a spectral map with dark endpoints, blue-cyan-yellow-red;
based on a fluid-jet simulation by NCSA [#]_
pink sequential increasing pastel black-pink-white, meant
for sepia tone colorization of photographs
prism repetitive red-yellow-green-blue-purple-...-green pattern
(not cyclic at endpoints)
spring linearly-increasing shades of magenta-yellow
summer sequential linearly-increasing shades of green-yellow
winter linearly-increasing shades of blue-green
========= =======================================================
For the above list only, you can also set the colormap using the
corresponding pylab shortcut interface function, similar to Matlab::
imshow(X)
hot()
jet()
The next set of palettes are from the `Yorick scientific visualisation
package <http://yorick.sourceforge.net/index.php>`_, an evolution of
the GIST package, both by David H. Munro:
============ =======================================================
Colormap Description
============ =======================================================
gist_earth mapmaker's colors from dark blue deep ocean to green
lowlands to brown highlands to white mountains
gist_heat sequential increasing black-red-orange-white, to emulate
blackbody radiation from an iron bar as it grows hotter
gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
colormap from National Center for Atmospheric
Research [#]_
gist_rainbow runs through the colors in spectral order from red to
violet at full saturation (like *hsv* but not cyclic)
gist_stern "Stern special" color table from Interactive Data
Language software
============ =======================================================
The following colormaps are based on the `ColorBrewer
<http://colorbrewer.org>`_ color specifications and designs developed by
Cynthia Brewer:
ColorBrewer Diverging (luminance is highest at the midpoint, and
decreases towards differently-colored endpoints):
======== ===================================
Colormap Description
======== ===================================
BrBG brown, white, blue-green
PiYG pink, white, yellow-green
PRGn purple, white, green
PuOr orange, white, purple
RdBu red, white, blue
RdGy red, white, gray
RdYlBu red, yellow, blue
RdYlGn red, yellow, green
Spectral red, orange, yellow, green, blue
======== ===================================
ColorBrewer Sequential (luminance decreases monotonically):
======== ====================================
Colormap Description
======== ====================================
Blues white to dark blue
BuGn white, light blue, dark green
BuPu white, light blue, dark purple
GnBu white, light green, dark blue
Greens white to dark green
Greys white to black (not linear)
Oranges white, orange, dark brown
OrRd white, orange, dark red
PuBu white, light purple, dark blue
PuBuGn white, light purple, dark green
PuRd white, light purple, dark red
Purples white to dark purple
RdPu white, pink, dark purple
Reds white to dark red
YlGn light yellow, dark green
YlGnBu light yellow, light green, dark blue
YlOrBr light yellow, orange, dark brown
YlOrRd light yellow, orange, dark red
======== ====================================
ColorBrewer Qualitative:
(For plotting nominal data, :class:`ListedColormap` should be used,
not :class:`LinearSegmentedColormap`. Different sets of colors are
recommended for different numbers of categories. These continuous
versions of the qualitative schemes may be removed or converted in the
future.)
* Accent
* Dark2
* Paired
* Pastel1
* Pastel2
* Set1
* Set2
* Set3
Other miscellaneous schemes:
============= =======================================================
Colormap Description
============= =======================================================
afmhot sequential black-orange-yellow-white blackbody
spectrum, commonly used in atomic force microscopy
brg blue-red-green
bwr diverging blue-white-red
coolwarm diverging blue-gray-red, meant to avoid issues with 3D
shading, color blindness, and ordering of colors [#]_
CMRmap "Default colormaps on color images often reproduce to
confusing grayscale images. The proposed colormap
maintains an aesthetically pleasing color image that
automatically reproduces to a monotonic grayscale with
discrete, quantifiable saturation levels." [#]_
cubehelix Unlike most other color schemes cubehelix was designed
by D.A. Green to be monotonically increasing in terms
of perceived brightness. Also, when printed on a black
and white postscript printer, the scheme results in a
greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b
values produced can be visualised as a squashed helix
around the diagonal in the r,g,b color cube.
gnuplot gnuplot's traditional pm3d scheme
(black-blue-red-yellow)
gnuplot2 sequential color printable as gray
(black-blue-violet-yellow-white)
ocean green-blue-white
rainbow spectral purple-blue-green-yellow-orange-red colormap
with diverging luminance
seismic diverging blue-white-red
nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
originally from the Neuroimaging in Python project
terrain mapmaker's colors, blue-green-yellow-brown-white,
originally from IGOR Pro
============= =======================================================
The following colormaps are redundant and may be removed in future
versions. It's recommended to use the names in the descriptions
instead, which produce identical output:
========= =======================================================
Colormap Description
========= =======================================================
gist_gray identical to *gray*
gist_yarg identical to *gray_r*
binary identical to *gray_r*
spectral identical to *nipy_spectral* [#]_
========= =======================================================
.. rubric:: Footnotes
.. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
choice for scientific visualization by many researchers: `Rainbow Color
Map (Still) Considered Harmful
<http://www.jwave.vt.edu/%7Erkriz/Projects/create_color_table/color_07.pdf>`_
.. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
Language. See `Color Table Gallery
<http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
.. [#] See `Diverging Color Maps for Scientific Visualization
<http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>`_ by Kenneth
Moreland.
.. [#] See `A Color Map for Effective Black-and-White Rendering of
Color-Scale Images
<http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
by Carey Rappaport
.. [#] Changed to distinguish from ColorBrewer's *Spectral* map.
:func:`spectral` still works, but
``set_cmap('nipy_spectral')`` is recommended for clarity.
"""
return sorted(cm.cmap_d.keys())
def _setup_pyplot_info_docstrings():
"""
Generates the plotting and docstring.
These must be done after the entire module is imported, so it is
called from the end of this module, which is generated by
boilerplate.py.
"""
# Generate the plotting docstring
import re
def pad(s, l):
"""Pad string *s* to length *l*."""
if l < len(s):
return s[:l]
return s + ' ' * (l - len(s))
commands = get_plot_commands()
first_sentence = re.compile("(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
# Collect the first sentence of the docstring for all of the
# plotting commands.
rows = []
max_name = 0
max_summary = 0
for name in commands:
doc = globals()[name].__doc__
summary = ''
if doc is not None:
match = first_sentence.match(doc)
if match is not None:
summary = match.group(0).strip().replace('\n', ' ')
name = '`%s`' % name
rows.append([name, summary])
max_name = max(max_name, len(name))
max_summary = max(max_summary, len(summary))
lines = []
sep = '=' * max_name + ' ' + '=' * max_summary
lines.append(sep)
lines.append(' '.join([pad("Function", max_name),
pad("Description", max_summary)]))
lines.append(sep)
for name, summary in rows:
lines.append(' '.join([pad(name, max_name),
pad(summary, max_summary)]))
lines.append(sep)
plotting.__doc__ = '\n'.join(lines)
## Plotting part 1: manually generated functions and wrappers ##
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = matplotlib.colorbar.colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image.
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, e.g., with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def set_cmap(cmap):
"""
Set the default colormap. Applies to the current image if any.
See help(colormaps) for more information.
*cmap* must be a :class:`~matplotlib.colors.Colormap` instance, or
the name of a registered colormap.
See :func:`matplotlib.cm.register_cmap` and
:func:`matplotlib.cm.get_cmap`.
"""
cmap = cm.get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
draw_if_interactive()
@docstring.copy_dedent(_imread)
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
@docstring.copy_dedent(_imsave)
def imsave(*args, **kwargs):
return _imsave(*args, **kwargs)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of *fignum*, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`. You may set the *origin*
kwarg to "lower" if you want the first row in the array to be
at the bottom instead of the top.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
A = np.asanyarray(A)
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
sci(im)
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format
strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
names=None, subplots=True, newfig=True, **kwargs):
"""
Plot the data in in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
*comments*, *skiprows*, *checkrows*, *delimiter*, and *names*
are all passed on to :func:`matplotlib.pylab.csv2rec` to
load the data into a record array.
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter, names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1,N):
if subplots:
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
elif i==1:
ax = fig.add_subplot(1,1,1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist, loc='best')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
def _autogen_docstring(base):
"""Autogenerated wrappers will get their docstring from a base function
with an addendum."""
msg = "\n\nAdditional kwargs: hold = [True|False] overrides default hold state"
addendum = docstring.Appender(msg, '\n\n')
return lambda func: addendum(docstring.copy_dedent(base)(func))
# This function cannot be generated by boilerplate.py because it may
# return an image or a line.
@_autogen_docstring(Axes.spy)
def spy(Z, precision=0, marker=None, markersize=None, aspect='equal', hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.spy(Z, precision, marker, markersize, aspect, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if isinstance(ret, cm.ScalarMappable):
sci(ret)
return ret
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.acorr)
def acorr(x, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.acorr(x, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.angle_spectrum)
def angle_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.angle_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
sides=sides, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.arrow)
def arrow(x, y, dx, dy, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.arrow(x, y, dx, dy, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhline)
def axhline(y=0, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhspan)
def axhspan(ymin, ymax, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvline)
def axvline(x=0, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvspan)
def axvspan(xmin, xmax, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.bar)
def bar(left, height, width=0.8, bottom=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.bar(left, height, width=width, bottom=bottom, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barh)
def barh(bottom, width, height=0.8, left=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.barh(bottom, width, height=height, left=left, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.broken_barh)
def broken_barh(xranges, yrange, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.broken_barh(xranges, yrange, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.boxplot)
def boxplot(x, notch=False, sym=None, vert=True, whis=1.5, positions=None,
widths=None, patch_artist=False, bootstrap=None, usermedians=None,
conf_intervals=None, meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None, capprops=None,
whiskerprops=None, manage_xticks=True, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.boxplot(x, notch=notch, sym=sym, vert=vert, whis=whis,
positions=positions, widths=widths,
patch_artist=patch_artist, bootstrap=bootstrap,
usermedians=usermedians,
conf_intervals=conf_intervals, meanline=meanline,
showmeans=showmeans, showcaps=showcaps,
showbox=showbox, showfliers=showfliers,
boxprops=boxprops, labels=labels,
flierprops=flierprops, medianprops=medianprops,
meanprops=meanprops, capprops=capprops,
whiskerprops=whiskerprops, manage_xticks=manage_xticks)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.cohere)
def cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.cohere(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.clabel)
def clabel(CS, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.clabel(CS, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contour)
def contour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contourf)
def contourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.csd)
def csd(x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.csd(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
return_line=return_line, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.errorbar)
def errorbar(x, y, yerr=None, xerr=None, fmt='', ecolor=None, elinewidth=None,
capsize=3, barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
elinewidth=elinewidth, capsize=capsize,
barsabove=barsabove, lolims=lolims, uplims=uplims,
xlolims=xlolims, xuplims=xuplims,
errorevery=errorevery, capthick=capthick, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.eventplot)
def eventplot(positions, orientation='horizontal', lineoffsets=1, linelengths=1,
linewidths=None, colors=None, linestyles='solid', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.eventplot(positions, orientation=orientation,
lineoffsets=lineoffsets, linelengths=linelengths,
linewidths=linewidths, colors=colors,
linestyles=linestyles, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill)
def fill(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_between)
def fill_between(x, y1, y2=0, where=None, interpolate=False, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_between(x, y1, y2=y2, where=where,
interpolate=interpolate, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_betweenx)
def fill_betweenx(y, x1, x2=0, where=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_betweenx(y, x1, x2=x2, where=where, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hexbin)
def hexbin(x, y, C=None, gridsize=100, bins=None, xscale='linear',
yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hexbin(x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
yscale=yscale, extent=extent, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, edgecolors=edgecolors,
reduce_C_function=reduce_C_function, mincnt=mincnt,
marginals=marginals, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist)
def hist(x, bins=10, range=None, normed=False, weights=None, cumulative=False,
bottom=None, histtype='bar', align='mid', orientation='vertical',
rwidth=None, log=False, color=None, label=None, stacked=False,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist(x, bins=bins, range=range, normed=normed,
weights=weights, cumulative=cumulative, bottom=bottom,
histtype=histtype, align=align, orientation=orientation,
rwidth=rwidth, log=log, color=color, label=label,
stacked=stacked, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist2d)
def hist2d(x, y, bins=10, range=None, normed=False, weights=None, cmin=None,
cmax=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist2d(x, y, bins=bins, range=range, normed=normed,
weights=weights, cmin=cmin, cmax=cmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hlines)
def hlines(y, xmin, xmax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hlines(y, xmin, xmax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.imshow)
def imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None,
vmin=None, vmax=None, origin=None, extent=None, shape=None,
filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.imshow(X, cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha, vmin=vmin,
vmax=vmax, origin=origin, extent=extent, shape=shape,
filternorm=filternorm, filterrad=filterrad,
imlim=imlim, resample=resample, url=url, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.loglog)
def loglog(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.loglog(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.magnitude_spectrum)
def magnitude_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None,
sides=None, scale=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.magnitude_spectrum(x, Fs=Fs, Fc=Fc, window=window,
pad_to=pad_to, sides=sides, scale=scale,
**kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolor)
def pcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolormesh)
def pcolormesh(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolormesh(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.phase_spectrum)
def phase_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.phase_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
sides=sides, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pie)
def pie(x, explode=None, labels=None, colors=None, autopct=None,
pctdistance=0.6, shadow=False, labeldistance=1.1, startangle=None,
radius=None, counterclock=True, wedgeprops=None, textprops=None,
hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.pie(x, explode=explode, labels=labels, colors=colors,
autopct=autopct, pctdistance=pctdistance, shadow=shadow,
labeldistance=labeldistance, startangle=startangle,
radius=radius, counterclock=counterclock,
wedgeprops=wedgeprops, textprops=textprops)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot)
def plot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot_date)
def plot_date(x, y, fmt='o', tz=None, xdate=True, ydate=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot_date(x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,
**kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.psd)
def psd(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.psd(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
return_line=return_line, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiver)
def quiver(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiver(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiverkey)
def quiverkey(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiverkey(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.scatter)
def scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, verts=None, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.scatter(x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, verts=verts, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogx)
def semilogx(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogx(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogy)
def semilogy(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogy(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.specgram)
def specgram(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None, vmin=None, vmax=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.specgram(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, cmap=cmap,
xextent=xextent, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, mode=mode, scale=scale,
vmin=vmin, vmax=vmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stackplot)
def stackplot(x, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stackplot(x, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stem)
def stem(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stem(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.step)
def step(x, y, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.step(x, y, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.streamplot)
def streamplot(x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
transform=None, zorder=1, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.streamplot(x, y, u, v, density=density, linewidth=linewidth,
color=color, cmap=cmap, norm=norm,
arrowsize=arrowsize, arrowstyle=arrowstyle,
minlength=minlength, transform=transform,
zorder=zorder)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret.lines)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontour)
def tricontour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontourf)
def tricontourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tripcolor)
def tripcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tripcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.triplot)
def triplot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.triplot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.violinplot)
def violinplot(dataset, positions=None, vert=True, widths=0.5, showmeans=False,
showextrema=True, showmedians=False, points=100, bw_method=None,
hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.violinplot(dataset, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians,
points=points, bw_method=bw_method)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.vlines)
def vlines(x, ymin, ymax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.vlines(x, ymin, ymax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.xcorr)
def xcorr(x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.xcorr(x, y, normed=normed, detrend=detrend,
usevlines=usevlines, maxlags=maxlags, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barbs)
def barbs(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.barbs(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.cla)
def cla():
ret = gca().cla()
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.grid)
def grid(b=None, which='major', axis='both', **kwargs):
ret = gca().grid(b=b, which=which, axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.legend)
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.table)
def table(**kwargs):
ret = gca().table(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.text)
def text(x, y, s, fontdict=None, withdash=False, **kwargs):
ret = gca().text(x, y, s, fontdict=fontdict, withdash=withdash, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.annotate)
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.ticklabel_format)
def ticklabel_format(**kwargs):
ret = gca().ticklabel_format(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.locator_params)
def locator_params(axis='both', tight=None, **kwargs):
ret = gca().locator_params(axis=axis, tight=tight, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.tick_params)
def tick_params(axis='both', **kwargs):
ret = gca().tick_params(axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.margins)
def margins(*args, **kw):
ret = gca().margins(*args, **kw)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.autoscale)
def autoscale(enable=True, axis='both', tight=None):
ret = gca().autoscale(enable=enable, axis=axis, tight=tight)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
_setup_pyplot_info_docstrings()
| mit |
igryski/TRMM_blend | src/satellite/plot_TRMM_precip_correct_map.py | 1 | 12327 | # Make python script executable
#!/usr/bin/python
# pylint: disable=C0103
# pylint: disable-msg=C0103
# Script produces TRMM precip filtered using update (Zhong) Land Sea mask
# ==========================================
# Author: I.Stepanov (igor.stepanov@knmi.nl)
# 22.04.2016 @KNMI
# ============================================================================================
# Updates list
# 22.04.2016. Script created as a derivative of plotting TRMM Land Sea Mask
# ============================================================================================
# Load python modules
import netCDF4
import pylab as pl
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from pylab import *
import math as m
from mpl_toolkits.basemap import Basemap, cm
np.set_printoptions(threshold='nan') # print full array
# Define some paths
# ==========================================================================================
in_path = "/nobackup/users/stepanov/TRMM_data/nc/annual_files/cropped/"
in_path_rr_SACA = "/nobackup/users/stepanov/SACA/final/"
in_path_lsmsk_TRMM = "/nobackup/users/stepanov/TRMM_data/Land_Sea_Mask/"
# Files
#===========================================================================================
# Precip TRMM
#
#file_name='3B42_daily.2013_georef_SACA.nc'
file_name = '3B42_daily.2000_georef_SACA.nc'
# ncks -d latitude,-24.875,25.125 -d longitude,80.125,179.875 3B42_daily.2015.12.29.7.nc
# 3B42_daily.2015.12.29.7_georef_SACA.nc
# Precip SACA rr
file_r_SACA = 'rr_0.25deg_regular.nc'
# Land Sea Maks TRMM update by Zhong Liu, Ph.D. Zhong.Liu-1@nasa.gov, remapped as NN to TRMM r
file_lsm_TRMM_cdo_to_SACA_coords = 'TMPA_land_sea_mask_georef_SACA.nc'
#ncks -d lat,-24.875,25.125 -d lon,80.125,179.875 TMPA_land_sea_mask.nc
# TMPA_land_sea_mask_georef_SACA.nc
#===========================================================================================
# Full file paths
#===========================================================================================
file_pr = [in_path+file_name]
file_rr_SACA = [in_path_rr_SACA+file_r_SACA]
file_lsmask_TRMM_cdo_to_SACA = [in_path_lsmsk_TRMM+file_lsm_TRMM_cdo_to_SACA_coords]
# Review imported file paths in log
print "Location of TRMM precipitation file is: ", file_pr
print
print
print "Location of SACA precip file is: ", file_rr_SACA
print
print
print "Location of TRMM land-sea mask file is: ", file_lsmask_TRMM_cdo_to_SACA
print
print
#===========================================================================================
# Define paths to NC files
#===========================================================================================
# Precip and elevation (Land Sea Mask)
nc_trmm = Dataset(in_path+file_name, 'r')
# [latitude, longitude][201x400]
nc_SACA_rr = Dataset(in_path_rr_SACA+file_r_SACA, 'r')
# [longitude, latitude][400x201]
nc_lsmask_trmm = Dataset(in_path_lsmsk_TRMM+file_lsm_TRMM_cdo_to_SACA_coords)
# new LS maks by Zhong Liu
# Coordinates for TRMM
lons = nc_trmm.variables['longitude']
lats = nc_trmm.variables['latitude']
# Coordinates for SACA
lons_saca = nc_SACA_rr.variables['longitude']
lats_saca = nc_SACA_rr.variables['latitude']
# Coordinates for LS mask
lons_ls_mask = nc_lsmask_trmm.variables['lon'][:]
lats_ls_mask = nc_lsmask_trmm.variables['lat'][:]
print 'lats_ls_mask', lats_ls_mask
# =======================================================================================
# Extract the actual variable
# For TRMM data go from 1-365 in ncview, but python counts 0-364
#
# INDIVIDUAL DAY
#
# =======================================================================================
# trmm_precip = nc_trmm.variables['r'][89,:,:]
# [time, lat, lon], 0= 01.01.2013 (python). 90 is 31st March ideally.
trmm_precip = nc_trmm.variables['r'][161, :, :]
# [time, lat, lon], 0= 01.01.2013 (python). 161 is 31st March ideally.
saca_precip = nc_SACA_rr.variables['rr'][11688, :, :]
# 11688 = 01.Jan.2013. (python)
trmm_lsmask = nc_lsmask_trmm.variables['landseamask'][:, :]
# [landseamask, latitude, longitude]
# =======================================================================================
# 1-12418 in ncview, but python counts 0-12417
# Accumulated period
# =======================================================================================
# Import entire year of precip data now
trmm_precip_array = nc_trmm.variables['r'][0-364, :, :]
# [time, lat, lon], 0= 01.01.2013 (python)
trmm_precip_array_2 = nc_trmm.variables['r'][:, :, :]
# =======================================================================================
print
#print 'precip array 2013', trmm_precip_array
print
#print 'precip array 2013_2', trmm_precip_array_2
print
#print 'precip array 2013_2 - precip array 2013', trmm_precip_array_2-trmm_precip_array
#quit()
# Data pre-processing
#===========================================================================================
# Pre-process TRMM land sea mask
#==================================
# # Define fill_value
# fill_value=-999.9
# # All land points convert to 1
# trmm_lsmask[trmm_lsmask!=100]=1.
# # All sea points convert to fill_value (-999.9)
# trmm_lsmask[trmm_lsmask==100]=fill_value
# # New mask should now be: 1=land, fill_value=sea
# # Multiply with TRMM data when plotting
# # SPrint new TRMM mask (1,fill_value only!)
# print 'TRMM land sea mask',trmm_lsmask
# print
# Do the same with new TRMM land sea mask (cdo remapnn to SACA coordinates)
# Pre-process SACA land sea mask
#==================================
# # All land points convert to 1
# trmm_lsmask_cdo[trmm_lsmask_cdo!=100]=1.
# # All sea points convert to fill_value (-999.9)
# trmm_lsmask_cdo[trmm_lsmask_cdo==100]=fill_value
# # New mask should now be: 1=land, fill_value=sea
# # Multiply with TRMM data when plotting
# # SPrint new TRMM mask (1,fill_value only!)
# print 'TRMM land sea mask CDO to SACA',trmm_lsmask_cdo
# print
# Design FIGURE
# ================================================================
xsize = 20
ysize = 10
fig = plt.figure(figsize=(xsize, ysize))
# Map projection
# ================================================================
# Experimental to match coast line better with TRMM orography
m = Basemap(projection='gall',
# lat_0=0.125, lon_0=130,
llcrnrlon=80.125, llcrnrlat=-24.875,
urcrnrlon=179.875, urcrnrlat=25.125,
# fix_aspect=True,
area_thresh=100.0,
resolution='i')
m.drawcoastlines(linewidth=0.75)
m.drawcountries(linewidth=0.75)
# draw parallels.
parallels = np.arange(-40., 40, 10.)
m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=10)
# draw meridians
meridians = np.arange(80., 180., 10.)
m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=10)
# Colorbar with NSW Precip colors
nws_precip_colors = [
"#04e9e7", # 0.01 - 0.10 inches
"#019ff4", # 0.10 - 0.25 inches
"#0300f4", # 0.25 - 0.50 inches
"#02fd02", # 0.50 - 0.75 inches
"#01c501", # 0.75 - 1.00 inches
"#008e00", # 1.00 - 1.50 inches
"#fdf802", # 1.50 - 2.00 inches
"#e5bc00", # 2.00 - 2.50 inches
"#fd9500", # 2.50 - 3.00 inches
"#fd0000", # 3.00 - 4.00 inches
"#d40000", # 4.00 - 5.00 inches
"#bc0000", # 5.00 - 6.00 inches
"#f800fd", # 6.00 - 8.00 inches
"#9854c6", # 8.00 - 10.00 inches
"#fdfdfd" # 10.00+
]
precip_colormap = matplotlib.colors.ListedColormap(nws_precip_colors)
# Make grid for TRMM data
# ny = trmm_precip.shape[0]
# nx = trmm_precip.shape[1]
# lons, lats = m.makegrid(nx, ny) # get lat/lons of ny by nx evenly spaced grid.
# x, y = m(lons, lats) # compute map proj coordinates.
# Alternative grid
lonst, latst = np.meshgrid(lons, lats)
x, y = m(lonst, latst)
# Make grid for SACA data
ny_saca = saca_precip.shape[0]
nx_saca = saca_precip.shape[1]
lons_saca, lats_saca = m.makegrid(nx_saca, ny_saca)
x_saca, y_saca = m(lons_saca, lats_saca)
# Make grid for TRMM Land Sea mask (updated)
lons_mask, lats_mask = np.meshgrid(lons_ls_mask, lats_ls_mask)
x_mask, y_mask = m(lons_mask, lats_mask)
print 'lons_mask', lons_mask
print 'lats_mask', lats_mask
print
# ================================================================
# Actual plotting and rendering
# ================================================================
# # Alternative SACA NaN removal
# #
# # where_are_NaNs = isnan(saca_precip)
# # saca_precip[where_are_NaNs] = 0
# print 'SACA LS mask is: ', saca_precip
# print
# clevs_saca_oro=(0.0,1.0)
# #cs = m.contourf(x_saca,y_saca,saca_precip,clevs_saca_oro,cmap=cm.s3pcpn)
# #cs = m.contourf(x_saca,y_saca,trmm_lsmask-(saca_precip*0.0+5.0),cmap=cm.s3pcpn)
# cs = m.contourf(x_saca,y_saca,trmm_lsmask-saca_precip,cmap=cm.s3pcpn)
# cs = m.contourf(x_saca,y_saca,trmm_lsmask-(saca_precip*0.0+5.0),clevs,cmap=cm.s3pcpn)
# cbar = m.colorbar(cs)
# plt.title('TRMM min SACA [precip] Land Sea Mask (01. January 2014)', size=26)
# savefig('plots/Land_sea_mask_TRMM_min_SACA_precip.png',optimize=True,quality=85,dpi=900)
#SACA LS mask
# ===============
# cs = m.contourf(x_saca,y_saca,saca_precip*0.0+5.0,clevs,cmap=cm.s3pcpn)
# cbar = m.colorbar(cs)
# plt.title('SACA precip Land Sea Mask (01. January 2013)', size=26)
# savefig('plots/Land_sea_mask_SACA_precip.png',optimize=True,quality=85,dpi=900)
# TRMM LS mask
# Process TRMM_LS_maks so that only land points are used
#
trmm_lsmask[trmm_lsmask==100.0]=np.NaN
# clevs_oro=[0.0,5.0,10.0]
# cs = m.contourf(x,y,trmm_lsmask,clevs_oro)
# cs = m.contourf(x,y,trmm_lsmask,clevs,cmap=cm.s3pcpn)
# Add colorbar
# cbar = m.colorbar(cs)
# Add title
# plt.title('TRMM (NASA) land-sea mask for precip (01. January 2013))', size=26)
# Set label
# savefig('plots/Land_sea_mask_TRMM_precip.png',optimize=True,quality=85,dpi=900)
# TRMM LS mask, when CDO remapped to SACA
# Process TRMM_LS_maks so that only land points have values
#
# trmm_lsmask_cdo[trmm_lsmask_cdo==100.0]=np.NaN
# Updated LS Mask by NASA
#cs = m.pcolor(x,y,trmm_lsmask_update)
#cs= m.pcolormesh(x,y,trmm_lsmask_update)
#cs =m.contourf(x_mask,y_mask,trmm_lsmask,clevs_wat_perc)
# Update TRMM precip using new LS mask that is correct (Zhong, NASA)
# ---------------------------------------------------------------------------------------
# Without LS mask
# cs =m.contourf(x,y,trmm_precip,clevs_precip,cmap=cm.s3pcpn)
# With LS mask
# cs =m.contourf(x,y,trmm_precip*trmm_lsmask,clevs_precip,cmap=cm.s3pcpn)
# Used last time
# clevs_precip_white_zero_SACA = [-0.5,0,0.1,0.5,2.5,5,7.5,10,15,20,30,40,50,100]
# New clevel upgrade
# clevs_precip_white_zero_SACA = [0,0.1,0.5,2.5,5,7.5,10,15,20,30,40,50,100]#,200,250]
# From original color map
clevs_precip_white_zero_SACA = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0,
6.0]
cs = m.contourf(x, y, trmm_precip*trmm_lsmask,
clevs_precip_white_zero_SACA,
cmap=precip_colormap)
# cs =m.contourf(x,y,trmm_precip*trmm_lsmask,
# clevs_precip_white_zero,
# cmap=plt.cm.jet,
# ticks=[0,1,2.5,7.5,10,15,30,50,100,150])
# Heavy rain, sum over a year
#
# cs = m.contourf(x,y,trmm_precip_array*trmm_lsmask,clevs_precip_med_heavy)
# cs = m.contourf(x,y,trmm_precip_array*trmm_lsmask,clevs_precip) # entire year
# cs = m.pcolormesh(x,y,trmm_precip_array+100.0*trmm_lsmask)
# ---------------------------------------------------------------------------------------
# Add colorbar
# cbar =m.colorbar(cs,ticks=[0,0.1,0.5,2.5,5.0,7.5,10,15,20,30,40,50,100]) #
cbar = m.colorbar(cs)
# Colorbar units
cbar.set_label('Rainfall [mm]', fontsize=16)
# Title
#
# plt.title('TRMM precipitation | w/ Land Sea Mask | 31.03.2000', size=26)
plt.title('TRMM precipitation | w/ Land Sea Mask | 10.06.2010', size=20)
# Save plot as PNG
# ------------------------------------------------------------------------------------
# With LS mask one day
#
# savefig('plots/Precip_TRMM_from_LS_mask_update_contourf_new_lat_0_correct_grid_w_LS_mask_'
# 'IN_10062010_white_zero_mm_min_one day.png',
# bbox_inches='tight',
# optimize=True,
# quality=85,
# dpi=300)
plt.show()
quit()
| gpl-3.0 |
mne-tools/mne-tools.github.io | stable/_downloads/20d7549c5bd5bd8e5e6da29e3c3241e4/10_background_stats.py | 10 | 29158 | # -*- coding: utf-8 -*-
"""
.. _disc-stats:
=====================
Statistical inference
=====================
Here we will briefly cover multiple concepts of inferential statistics in an
introductory manner, and demonstrate how to use some MNE statistical functions.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore
import mne
from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction,
permutation_t_test, permutation_cluster_1samp_test)
print(__doc__)
###############################################################################
# Hypothesis testing
# ------------------
# Null hypothesis
# ^^^^^^^^^^^^^^^
# From `Wikipedia <https://en.wikipedia.org/wiki/Null_hypothesis>`__:
#
# In inferential statistics, a general statement or default position that
# there is no relationship between two measured phenomena, or no
# association among groups.
#
# We typically want to reject a **null hypothesis** with
# some probability (e.g., p < 0.05). This probability is also called the
# significance level :math:`\alpha`.
# To think about what this means, let's follow the illustrative example from
# :footcite:`RidgwayEtAl2012` and construct a toy dataset consisting of a
# 40 x 40 square with a "signal" present in the center with white noise added
# and a Gaussian smoothing kernel applied.
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
n_permutations = 'all' # run an exact test
n_src = width * width
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(2)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# The data averaged over all subjects looks like this:
fig, ax = plt.subplots()
ax.imshow(X.mean(0), cmap='inferno')
ax.set(xticks=[], yticks=[], title="Data averaged over subjects")
###############################################################################
# In this case, a null hypothesis we could test for each voxel is:
#
# There is no difference between the mean value and zero
# (:math:`H_0 \colon \mu = 0`).
#
# The alternative hypothesis, then, is that the voxel has a non-zero mean
# (:math:`H_1 \colon \mu \neq 0`).
# This is a *two-tailed* test because the mean could be less than
# or greater than zero, whereas a *one-tailed* test would test only one of
# these possibilities, i.e. :math:`H_1 \colon \mu \geq 0` or
# :math:`H_1 \colon \mu \leq 0`.
#
# .. note:: Here we will refer to each spatial location as a "voxel".
# In general, though, it could be any sort of data value,
# including cortical vertex at a specific time, pixel in a
# time-frequency decomposition, etc.
#
# Parametric tests
# ^^^^^^^^^^^^^^^^
# Let's start with a **paired t-test**, which is a standard test
# for differences in paired samples. Mathematically, it is equivalent
# to a 1-sample t-test on the difference between the samples in each condition.
# The paired t-test is **parametric**
# because it assumes that the underlying sample distribution is Gaussian, and
# is only valid in this case. This happens to be satisfied by our toy dataset,
# but is not always satisfied for neuroimaging data.
#
# In the context of our toy dataset, which has many voxels
# (:math:`40 \cdot 40 = 1600`), applying the paired t-test is called a
# *mass-univariate* approach as it treats each voxel independently.
titles = ['t']
out = stats.ttest_1samp(X, 0, axis=0)
ts = [out[0]]
ps = [out[1]]
mccs = [False] # these are not multiple-comparisons corrected
def plot_t_p(t, p, title, mcc, axes=None):
if axes is None:
fig = plt.figure(figsize=(6, 3))
axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)]
show = True
else:
show = False
p_lims = [0.1, 0.001]
t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1)
p_lims = [-np.log10(p) for p in p_lims]
# t plot
x, y = np.mgrid[0:width, 0:width]
surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)),
rstride=1, cstride=1, linewidth=0,
vmin=t_lims[0], vmax=t_lims[1], cmap='viridis')
axes[0].set(xticks=[], yticks=[], zticks=[],
xlim=[0, width - 1], ylim=[0, width - 1])
axes[0].view_init(30, 15)
cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=surf)
cbar.set_ticks(t_lims)
cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims])
cbar.set_label('t-value')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if not show:
axes[0].set(title=title)
if mcc:
axes[0].title.set_weight('bold')
# p plot
use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width)))
img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1],
interpolation='nearest')
axes[1].set(xticks=[], yticks=[])
cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=img)
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims])
cbar.set_label(r'$-\log_{10}(p)$')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if show:
text = fig.suptitle(title)
if mcc:
text.set_weight('bold')
plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0)
mne.viz.utils.plt_show()
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# The "hat" technique regularizes the variance values used in the t-test
# calculation :footcite:`RidgwayEtAl2012` to compensate for implausibly small
# variances.
ts.append(ttest_1samp_no_p(X, sigma=sigma))
ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2)
titles.append(r'$\mathrm{t_{hat}}$')
mccs.append(False)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric tests
# ^^^^^^^^^^^^^^^^^^^^
# Instead of assuming an underlying Gaussian distribution, we could instead
# use a **non-parametric resampling** method. In the case of a paired t-test
# between two conditions A and B, which is mathematically equivalent to a
# one-sample t-test between the difference in the conditions A-B, under the
# null hypothesis we have the principle of **exchangeability**. This means
# that, if the null is true, we can exchange conditions and not change
# the distribution of the test statistic.
#
# When using a paired t-test, exchangeability thus means that we can flip the
# signs of the difference between A and B. Therefore, we can construct the
# **null distribution** values for each voxel by taking random subsets of
# samples (subjects), flipping the sign of their difference, and recording the
# absolute value of the resulting statistic (we record the absolute value
# because we conduct a two-tailed test). The absolute value of the statistic
# evaluated on the veridical data can then be compared to this distribution,
# and the p-value is simply the proportion of null distribution values that
# are smaller.
#
# .. warning:: In the case of a true one-sample t-test, i.e. analyzing a single
# condition rather than the difference between two conditions,
# it is not clear where/how exchangeability applies; see
# `this FieldTrip discussion <ft_exch_>`_.
#
# In the case where ``n_permutations`` is large enough (or "all") so
# that the complete set of unique resampling exchanges can be done
# (which is :math:`2^{N_{samp}}-1` for a one-tailed and
# :math:`2^{N_{samp}-1}-1` for a two-tailed test, not counting the
# veridical distribution), instead of randomly exchanging conditions
# the null is formed from using all possible exchanges. This is known
# as a permutation test (or exact test).
# Here we have to do a bit of gymnastics to get our function to do
# a permutation test without correcting for multiple comparisons:
X.shape = (n_subjects, n_src) # flatten the array for simplicity
titles.append('Permutation')
ts.append(np.zeros(width * width))
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Multiple comparisons
# --------------------
# So far, we have done no correction for multiple comparisons. This is
# potentially problematic for these data because there are
# :math:`40 \cdot 40 = 1600` tests being performed. If we use a threshold
# p < 0.05 for each individual test, we would expect many voxels to be declared
# significant even if there were no true effect. In other words, we would make
# many **type I errors** (adapted from `here <errors_>`_):
#
# .. rst-class:: skinnytable
#
# +----------+--------+------------------+------------------+
# | | Null hypothesis |
# | +------------------+------------------+
# | | True | False |
# +==========+========+==================+==================+
# | | | Type I error | Correct |
# | | Yes | False positive | True positive |
# + Reject +--------+------------------+------------------+
# | | | Correct | Type II error |
# | | No | True Negative | False negative |
# +----------+--------+------------------+------------------+
#
# To see why, consider a standard :math:`\alpha = 0.05`.
# For a single test, our probability of making a type I error is 0.05.
# The probability of making at least one type I error in
# :math:`N_{\mathrm{test}}` independent tests is then given by
# :math:`1 - (1 - \alpha)^{N_{\mathrm{test}}}`:
N = np.arange(1, 80)
alpha = 0.05
p_type_I = 1 - (1 - alpha) ** N
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(N, p_type_I, 3)
ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel=r'$N_{\mathrm{test}}$',
ylabel=u'Probability of at least\none type I error')
ax.grid(True)
fig.tight_layout()
fig.show()
###############################################################################
# To combat this problem, several methods exist. Typically these
# provide control over either one of the following two measures:
#
# 1. `Familywise error rate (FWER) <fwer_>`_
# The probability of making one or more type I errors:
#
# .. math::
# \mathrm{P}(N_{\mathrm{type\ I}} >= 1 \mid H_0)
#
# 2. `False discovery rate (FDR) <fdr_>`_
# The expected proportion of rejected null hypotheses that are
# actually true:
#
# .. math::
# \mathrm{E}(\frac{N_{\mathrm{type\ I}}}{N_{\mathrm{reject}}}
# \mid N_{\mathrm{reject}} > 0) \cdot
# \mathrm{P}(N_{\mathrm{reject}} > 0 \mid H_0)
#
# We cover some techniques that control FWER and FDR below.
#
# Bonferroni correction
# ^^^^^^^^^^^^^^^^^^^^^
# Perhaps the simplest way to deal with multiple comparisons, `Bonferroni
# correction <https://en.wikipedia.org/wiki/Bonferroni_correction>`__
# conservatively multiplies the p-values by the number of comparisons to
# control the FWER.
titles.append('Bonferroni')
ts.append(ts[-1])
ps.append(bonferroni_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# False discovery rate (FDR) correction
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Typically FDR is performed with the Benjamini-Hochberg procedure, which
# is less restrictive than Bonferroni correction for large numbers of
# comparisons (fewer type II errors), but provides less strict control of type
# I errors.
titles.append('FDR')
ts.append(ts[-1])
ps.append(fdr_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric resampling test with a maximum statistic
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# **Non-parametric resampling tests** can also be used to correct for multiple
# comparisons. In its simplest form, we again do permutations using
# exchangeability under the null hypothesis, but this time we take the
# *maximum statistic across all voxels* in each permutation to form the
# null distribution. The p-value for each voxel from the veridical data
# is then given by the proportion of null distribution values
# that were smaller.
#
# This method has two important features:
#
# 1. It controls FWER.
# 2. It is non-parametric. Even though our initial test statistic
# (here a 1-sample t-test) is parametric, the null
# distribution for the null hypothesis rejection (the mean value across
# subjects is indistinguishable from zero) is obtained by permutations.
# This means that it makes no assumptions of Gaussianity
# (which do hold for this example, but do not in general for some types
# of processed neuroimaging data).
titles.append(r'$\mathbf{Perm_{max}}$')
out = permutation_t_test(X, verbose=False)[:2]
ts.append(out[0])
ps.append(out[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Clustering
# ^^^^^^^^^^
# Each of the aforementioned multiple comparisons corrections have the
# disadvantage of not fully incorporating the correlation structure of the
# data, namely that points close to one another (e.g., in space or time) tend
# to be correlated. However, by defining the adjacency/adjacency/neighbor
# structure in our data, we can use **clustering** to compensate.
#
# To use this, we need to rethink our null hypothesis. Instead
# of thinking about a null hypothesis about means per voxel (with one
# independent test per voxel), we consider a null hypothesis about sizes
# of clusters in our data, which could be stated like:
#
# The distribution of spatial cluster sizes observed in two experimental
# conditions are drawn from the same probability distribution.
#
# Here we only have a single condition and we contrast to zero, which can
# be thought of as:
#
# The distribution of spatial cluster sizes is independent of the sign
# of the data.
#
# In this case, we again do permutations with a maximum statistic, but, under
# each permutation, we:
#
# 1. Compute the test statistic for each voxel individually.
# 2. Threshold the test statistic values.
# 3. Cluster voxels that exceed this threshold (with the same sign) based on
# adjacency.
# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel
# count, or by the sum of voxel t-values within the cluster) to build the
# null distribution.
#
# After doing these permutations, the cluster sizes in our veridical data
# are compared to this null distribution. The p-value associated with each
# cluster is again given by the proportion of smaller null distribution
# values. This can then be subjected to a standard p-value threshold
# (e.g., p < 0.05) to reject the null hypothesis (i.e., find an effect of
# interest).
#
# This reframing to consider *cluster sizes* rather than *individual means*
# maintains the advantages of the standard non-parametric permutation
# test -- namely controlling FWER and making no assumptions of parametric
# data distribution.
# Critically, though, it also accounts for the correlation structure in the
# data -- which in this toy case is spatial but in general can be
# multidimensional (e.g., spatio-temporal) -- because the null distribution
# will be derived from data in a way that preserves these correlations.
#
# .. sidebar:: Effect size
#
# For a nice description of how to compute the effect size obtained
# in a cluster test, see this
# `FieldTrip mailing list discussion <ft_cluster_effect_size_>`_.
#
# However, there is a drawback. If a cluster significantly deviates from
# the null, no further inference on the cluster (e.g., peak location) can be
# made, as the entire cluster as a whole is used to reject the null.
# Moreover, because the test statistic concerns the full data, the null
# hypothesis (and our rejection of it) refers to the structure of the full
# data. For more information, see also the comprehensive
# `FieldTrip tutorial <ft_cluster_>`_.
#
# Defining the adjacency matrix
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First we need to define our adjacency (sometimes called "neighbors") matrix.
# This is a square array (or sparse matrix) of shape ``(n_src, n_src)`` that
# contains zeros and ones to define which spatial points are neighbors, i.e.,
# which voxels are adjacent to each other. In our case this
# is quite simple, as our data are aligned on a rectangular grid.
#
# Let's pretend that our data were smaller -- a 3 x 3 grid. Thinking about
# each voxel as being connected to the other voxels it touches, we would
# need a 9 x 9 adjacency matrix. The first row of this matrix contains the
# voxels in the flattened data that the first voxel touches. Since it touches
# the second element in the first row and the first element in the second row
# (and is also a neighbor to itself), this would be::
#
# [1, 1, 0, 1, 0, 0, 0, 0, 0]
#
# :mod:`sklearn.feature_extraction` provides a convenient function for this:
from sklearn.feature_extraction.image import grid_to_graph # noqa: E402
mini_adjacency = grid_to_graph(3, 3).toarray()
assert mini_adjacency.shape == (9, 9)
print(mini_adjacency[0])
###############################################################################
# In general the adjacency between voxels can be more complex, such as
# those between sensors in 3D space, or time-varying activation at brain
# vertices on a cortical surface. MNE provides several convenience functions
# for computing adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``adjacency=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.
titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None,
n_permutations=n_permutations, out_type='mask')
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances :footcite:`RidgwayEtAl2012`:
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
p_hat = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_hat[cl] = p
ts.append(t_hat)
ps.append(p_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# .. _tfce_example:
#
# Threshold-free cluster enhancement (TFCE)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TFCE eliminates the free parameter initial ``threshold`` value that
# determines which points are included in clustering by approximating
# a continuous integration across possible threshold values with a standard
# `Riemann sum <https://en.wikipedia.org/wiki/Riemann_sum>`__
# :footcite:`SmithNichols2009`.
# This requires giving a starting threshold ``start`` and a step
# size ``step``, which in MNE is supplied as a dict.
# The smaller the ``step`` and closer to 0 the ``start`` value,
# the better the approximation, but the longer it takes.
#
# A significant advantage of TFCE is that, rather than modifying the
# statistical null hypothesis under test (from one about individual voxels
# to one about the distribution of clusters in the data), it modifies the *data
# under test* while still controlling for multiple comparisons.
# The statistical test is then done at the level of individual voxels rather
# than clusters. This allows for evaluation of each point
# independently for significance rather than only as cluster groups.
titles.append(r'$\mathbf{C_{TFCE}}$')
threshold_tfce = dict(start=0, step=0.2)
t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None,
n_permutations=n_permutations, out_type='mask')
ts.append(t_tfce)
ps.append(p_tfce)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# We can also combine TFCE and the "hat" correction:
titles.append(r'$\mathbf{C_{hat,TFCE}}$')
t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
ts.append(t_tfce_hat)
ps.append(p_tfce_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Visualize and compare methods
# -----------------------------
# Let's take a look at these statistics. The top row shows each test statistic,
# and the bottom shows p-values for various statistical tests, with the ones
# with proper control over FWER or FDR with bold titles.
fig = plt.figure(facecolor='w', figsize=(14, 3))
assert len(ts) == len(titles) == len(ps)
for ii in range(len(ts)):
ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'),
fig.add_subplot(2, 10, 11 + ii)]
plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax)
fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1)
plt.show()
###############################################################################
# The first three columns show the parametric and non-parametric statistics
# that are not corrected for multiple comparisons:
#
# - Mass univariate **t-tests** result in jagged edges.
# - **"Hat" variance correction** of the t-tests produces less peaky edges,
# correcting for sharpness in the statistic driven by low-variance voxels.
# - **Non-parametric resampling tests** are very similar to t-tests. This is to
# be expected: the data are drawn from a Gaussian distribution, and thus
# satisfy parametric assumptions.
#
# The next three columns show multiple comparison corrections of the
# mass univariate tests (parametric and non-parametric). These
# too conservatively correct for multiple comparisons because neighboring
# voxels in our data are correlated:
#
# - **Bonferroni correction** eliminates any significant activity.
# - **FDR correction** is less conservative than Bonferroni.
# - A **permutation test with a maximum statistic** also eliminates any
# significant activity.
#
# The final four columns show the non-parametric cluster-based permutation
# tests with a maximum statistic:
#
# - **Standard clustering** identifies the correct region. However, the whole
# area must be declared significant, so no peak analysis can be done.
# Also, the peak is broad.
# - **Clustering with "hat" variance adjustment** tightens the estimate of
# significant activity.
# - **Clustering with TFCE** allows analyzing each significant point
# independently, but still has a broadened estimate.
# - **Clustering with TFCE and "hat" variance adjustment** tightens the area
# declared significant (again FWER corrected).
#
# Statistical functions in MNE
# ----------------------------
# The complete listing of statistical functions provided by MNE are in
# the :ref:`Statistics API list <api_reference_statistics>`, but we will give
# a brief overview here.
#
# MNE provides several convenience parametric testing functions that can be
# used in conjunction with the non-parametric clustering methods. However,
# the set of functions we provide is not meant to be exhaustive.
#
# If the univariate statistical contrast of interest is not listed here
# (e.g., interaction term in an unbalanced ANOVA), consider checking out the
# :mod:`statsmodels` package. It offers many functions for computing
# statistical contrasts, e.g., :func:`statsmodels.stats.anova.anova_lm`.
# To use these functions in clustering:
#
# 1. Determine which test statistic (e.g., t-value, F-value) you would use
# in a univariate context to compute your contrast of interest. In other
# words, if there were only a single output such as reaction times, what
# test statistic might you compute on the data?
# 2. Wrap the call to that function within a function that takes an input of
# the same shape that is expected by your clustering function,
# and returns an array of the same shape without the "samples" dimension
# (e.g., :func:`mne.stats.permutation_cluster_1samp_test` takes an array
# of shape ``(n_samples, p, q)`` and returns an array of shape ``(p, q)``).
# 3. Pass this wrapped function to the ``stat_fun`` argument to the clustering
# function.
# 4. Set an appropriate ``threshold`` value (float or dict) based on the
# values your statistical contrast function returns.
#
# Parametric methods provided by MNE
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.ttest_1samp_no_p`
# Paired t-test, optionally with hat adjustment.
# This is used by default for contrast enhancement in paired cluster tests.
#
# - :func:`mne.stats.f_oneway`
# One-way ANOVA for independent samples.
# This can be used to compute various F-contrasts. It is used by default
# for contrast enhancement in non-paired cluster tests.
#
# - :func:`mne.stats.f_mway_rm`
# M-way ANOVA for repeated measures and balanced designs.
# This returns F-statistics and p-values. The associated helper function
# :func:`mne.stats.f_threshold_mway_rm` can be used to determine the
# F-threshold at a given significance level.
#
# - :func:`mne.stats.linear_regression`
# Compute ordinary least square regressions on multiple targets, e.g.,
# sensors, time points across trials (samples).
# For each regressor it returns the beta value, t-statistic, and
# uncorrected p-value. While it can be used as a test, it is
# particularly useful to compute weighted averages or deal with
# continuous predictors.
#
# Non-parametric methods
# ^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.permutation_cluster_test`
# Unpaired contrasts with clustering.
#
# - :func:`mne.stats.spatio_temporal_cluster_test`
# Unpaired contrasts with spatio-temporal clustering.
#
# - :func:`mne.stats.permutation_t_test`
# Paired contrast with no clustering.
#
# - :func:`mne.stats.permutation_cluster_1samp_test`
# Paired contrasts with clustering.
#
# - :func:`mne.stats.spatio_temporal_cluster_1samp_test`
# Paired contrasts with spatio-temporal clustering.
#
# .. warning:: In most MNE functions, data has shape
# ``(..., n_space, n_time)``, where the spatial dimension can
# be e.g. sensors or source vertices. But for our spatio-temporal
# clustering functions, the spatial dimensions need to be **last**
# for computational efficiency reasons. For example, for
# :func:`mne.stats.spatio_temporal_cluster_1samp_test`, ``X``
# needs to be of shape ``(n_samples, n_time, n_space)``. You can
# use :func:`numpy.transpose` to transpose axes if necessary.
#
# References
# ----------
# .. footbibliography::
#
# .. include:: ../../links.inc
| bsd-3-clause |
rollend/trading-with-python | lib/bats.py | 78 | 3458 | #-------------------------------------------------------------------------------
# Name: BATS
# Purpose: get data from BATS exchange
#
# Author: jev
#
# Created: 17/08/2013
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import urllib
import re
import pandas as pd
import datetime as dt
import zipfile
import StringIO
from extra import ProgressBar
import os
import yahooFinance as yf
from string import Template
import numpy as np
def fileName2date( fName):
'''convert filename to date'''
name = os.path.splitext(fName)[0]
m = re.findall('\d+',name)[0]
return dt.datetime.strptime(m,'%Y%m%d').date()
def date2fileName(date):
return 'BATSshvol%s.txt.zip' % date.strftime('%Y%m%d')
def downloadUrl(date):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
url = s.substitute(fName=date2fileName(date), year=date.year, month='%02d' % date.month)
return url
class BATS_Data(object):
def __init__(self, dataDir):
''' create class. dataDir: directory to which files are downloaded '''
self.dataDir = dataDir
self.shortRatio = None
self._checkDates()
def _checkDates(self):
''' update list of available dataset dates'''
self.dates = []
for fName in os.listdir(self.dataDir):
self.dates.append(fileName2date(fName))
def _missingDates(self):
''' check for missing dates based on spy data'''
print 'Getting yahoo data to determine business dates... ',
spy = yf.getHistoricData('SPY',sDate = (2010,1,1))
busDates = [d.date() for d in spy.index ]
print 'Date range: ', busDates[0] ,'-', busDates[-1]
missing = []
for d in busDates:
if d not in self.dates:
missing.append(d)
return missing
def updateDb(self):
print 'Updating database'
missing = self._missingDates()
for i, date in enumerate(missing):
source = downloadUrl(date)
dest = os.path.join(self.dataDir,date2fileName(date))
if not os.path.exists(dest):
print 'Downloading [%i/%i]' %(i,len(missing)), source
urllib.urlretrieve(source, dest)
else:
print 'x',
print 'Update done.'
self._checkDates()
def loadDate(self,date):
fName = os.path.join(self.dataDir, date2fileName(date))
zipped = zipfile.ZipFile(fName) # open zip file
lines = zipped.read(zipped.namelist()[0]) # read first file from to lines
buf = StringIO.StringIO(lines) # create buffer
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
s = df['Short Volume']/df['Total Volume']
s.name = dt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return s
def loadData(self):
''' load data from zip files '''
data = []
pb = ProgressBar(len(self.dates)-1)
for idx, date in enumerate(self.dates):
data.append(self.loadDate(date))
pb.animate(idx)
self.shortRatio = pd.DataFrame(data)
return self.shortRatio
| bsd-3-clause |