repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
qifeigit/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
annahs/atmos_research | LEO_2D_histos_from_db.py | 1 | 3992 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120401','%Y%m%d')
end_date = datetime.strptime('20120531','%Y%m%d')
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2.lupckl'
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
min_rBC_mass = 1.63#120 2.6-#140 3.86-#160nm 0.25
max_rBC_mass = 2.6#140 3.86-160 5.5-#180nm 10.05
VED_min = 65
VED_max = 220
scat_lim = 100
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
data = []
particles=0
no_scat=0
no_scat_110 =0
fit_failure=0
early_evap=0
early_evap_110=0
flat_fit=0
LF_high=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if meas_scat_amp < 6:
no_scat +=1
if rBC_VED > scat_lim:
no_scat_110+=1
data.append([rBC_VED,coat_thickness])
if LEO_amp == 0.0 and LF_baseline_pctdiff == None and meas_scat_amp >= 6:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -2:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -1:
fit_failure +=1
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
if LEO_amp > LF_max:
LF_high +=1
if LEO_amp > 0:
data.append([rBC_VED,coat_thickness])
print '# of particles', particles
print 'no_scat', no_scat
print 'no_scat_110', no_scat_110
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'early_evap_110', early_evap_110
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
evap_pct_110 = (early_evap_110)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
no_scat_pct_110 = no_scat_110*100./particles
print evap_pct, evap_pct_110, no_scat_pct,no_scat_pct_110
rBC_VEDs = [row[0] for row in data]
coatings = [row[1] for row in data]
median_coat = np.median (coatings)
print 'median coating',median_coat
#####hexbin coat vs core###
fig = plt.figure()
ax = fig.add_subplot(111)
#x_limits = [0,250]
#y_limits = [0,250]
#h = plt.hexbin(rBC_VEDs, coatings, cmap=cm.jet,gridsize = 50, mincnt=1)
hist = plt.hist(coatings, bins=50)
plt.xlabel('frequency')
plt.xlabel('Coating Thickness (nm)')
#cb = plt.colorbar()
#cb.set_label('frequency')
plt.show()
| mit |
bert9bert/statsmodels | statsmodels/graphics/tests/test_correlation.py | 31 | 1112 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
from statsmodels.datasets import randhie
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_corr():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr(corr_matrix, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, normcolor=True, title='', cmap='jet')
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_corr_grid():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr_grid([corr_matrix] * 2, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 5, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 3, normcolor=True, titles='', cmap='jet')
plt.close(fig)
| bsd-3-clause |
jakevdp/bokeh | bokeh/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
stefangri/s_s_productions | PHY341/V401_Interferometer/Messdaten/latex.py | 2 | 2289 | from pandas import Series, DataFrame
import pandas as pd
import collections
import numpy
import uncertainties
import pint
from uncertainties import ufloat
from uncertainties import ufloat_fromstr
from pint import UnitRegistry
import string
ureg = UnitRegistry()
Q_ = ureg.Quantity
class Latexdocument(object):
def __init__(self, filename):
self.name = filename
self.data = DataFrame(columns=(['tex', 'var']))
def tabular(self, spalten, header, places, caption, label):
with open(self.name, 'w') as f:
f.write('\\begin{table} \n\\centering \n\\caption{' + caption + '} \n\\label{tab: ' + label + '} \n\\begin{tabular}{')
f.write(len(spalten) * 'S ')
f.write('} \n\\toprule \n')
f.write(header + ' \\\ \n')
f.write('\\midrule \n ')
for i in range(0, len(spalten[0])):
for j in range(0, len(spalten)):
if j == len(spalten) - 1:
f.write(('{:.' + str(places[j]) + 'f}' + '\\\ \n').format(spalten[j][i]))
else:
f.write(('{:.' + str(places[j]) + 'f} ' + ' & ').format(spalten[j][i]))
f.write('\\bottomrule \n\\end{tabular} \n\\end{table}')
def app(self, name, value):
if (type(value.magnitude) == uncertainties.core.Variable or type(value.magnitude) == uncertainties.core.AffineScalarFunc):
val = '{:+.1uS}'.format(value.magnitude)
s = '{:Lx}'.format(Q_(2, value.units)) + '~'
df = DataFrame(collections.OrderedDict({'var': pd.Series(value, index = [name] ),
#'tex': name + ' = \SI{' + val[:val.index('+')]+ ' \pm ' + val[val.index('-')+1:] + s[s.index('}{'):s.index('~')]}))
'tex': name + ' = \SI{' + val + '}{' + s[s.index('}{'):s.index('~')]}))
self.data = self.data.append(df)
else:
df = DataFrame({'var': pd.Series(value, index = [name] ),
'tex': name + ' = ' + '{:Lx}'.format(value)})
self.data = self.data.append(df)
def makeresults(self):
print(self.data['var'])
with open(self.name, 'w') as f:
for i in self.data['tex']:
f.write(i + '\n')
| mit |
cajal/pipeline | python/pipeline/utils/galvo_corrections.py | 5 | 13668 | """ Utilities for motion and raster correction of resonant scans. """
import numpy as np
from scipy import interpolate as interp
from scipy import signal
from scipy import ndimage
from ..exceptions import PipelineException
from ..utils.signal import mirrconv
def compute_raster_phase(image, temporal_fill_fraction):
""" Compute raster correction for bidirectional resonant scanners.
It shifts the even and odd rows of the image in the x axis to find the scan angle
that aligns them better. Positive raster phase will shift even rows to the right and
odd rows to the left (assuming first row is row 0).
:param np.array image: The image to be corrected.
:param float temporal_fill_fraction: Fraction of time during which the scan is
recording a line against the total time per line.
:return: An angle (in radians). Estimate of the mismatch angle between the expected
initial angle and the one recorded.
:rtype: float
"""
# Make sure image has even number of rows (so number of even and odd rows is the same)
image = image[:-1] if image.shape[0] % 2 == 1 else image
# Get some params
image_height, image_width = image.shape
skip_rows = round(image_height * 0.05) # rows near the top or bottom have artifacts
skip_cols = round(image_width * 0.10) # so do columns
# Create images with even and odd rows
even_rows = image[::2][skip_rows: -skip_rows]
odd_rows = image[1::2][skip_rows: -skip_rows]
# Scan angle at which each pixel was recorded.
max_angle = (np.pi / 2) * temporal_fill_fraction
scan_angles = np.linspace(-max_angle, max_angle, image_width + 2)[1:-1]
#sin_index = np.sin(scan_angles)
# Greedy search for the best raster phase: starts at coarse estimates and refines them
even_interp = interp.interp1d(scan_angles, even_rows, fill_value='extrapolate')
odd_interp = interp.interp1d(scan_angles, odd_rows, fill_value='extrapolate')
angle_shift = 0
for scale in [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
angle_shifts = angle_shift + scale * np.linspace(-9, 9, 19)
match_values = []
for new_angle_shift in angle_shifts:
shifted_evens = even_interp(scan_angles + new_angle_shift)
shifted_odds = odd_interp(scan_angles - new_angle_shift)
match_values.append(np.sum(shifted_evens[:, skip_cols: -skip_cols] *
shifted_odds[:, skip_cols: -skip_cols]))
angle_shift = angle_shifts[np.argmax(match_values)]
return angle_shift
def compute_motion_shifts(scan, template, in_place=True, num_threads=8):
""" Compute shifts in y and x for rigid subpixel motion correction.
Returns the number of pixels that each image in the scan was to the right (x_shift)
or below (y_shift) the template. Negative shifts mean the image was to the left or
above the template.
:param np.array scan: 2 or 3-dimensional scan (image_height, image_width[, num_frames]).
:param np.array template: 2-d template image. Each frame in scan is aligned to this.
:param bool in_place: Whether the scan can be overwritten.
:param int num_threads: Number of threads used for the ffts.
:returns: (y_shifts, x_shifts) Two arrays (num_frames) with the y, x motion shifts.
..note:: Based in imreg_dft.translation().
"""
import pyfftw
from imreg_dft import utils
# Add third dimension if scan is a single image
if scan.ndim == 2:
scan = np.expand_dims(scan, -1)
# Get some params
image_height, image_width, num_frames = scan.shape
taper = np.outer(signal.tukey(image_height, 0.2), signal.tukey(image_width, 0.2))
# Prepare fftw
frame = pyfftw.empty_aligned((image_height, image_width), dtype='complex64')
fft = pyfftw.builders.fft2(frame, threads=num_threads, overwrite_input=in_place,
avoid_copy=True)
ifft = pyfftw.builders.ifft2(frame, threads=num_threads, overwrite_input=in_place,
avoid_copy=True)
# Get fourier transform of template
template_freq = fft(template * taper).conj() # we only need the conjugate
abs_template_freq = abs(template_freq)
eps = abs_template_freq.max() * 1e-15
# Compute subpixel shifts per image
y_shifts = np.empty(num_frames)
x_shifts = np.empty(num_frames)
for i in range(num_frames):
# Compute correlation via cross power spectrum
image_freq = fft(scan[:, :, i] * taper)
cross_power = (image_freq * template_freq) / (abs(image_freq) * abs_template_freq + eps)
shifted_cross_power = np.fft.fftshift(abs(ifft(cross_power)))
# Get best shift
shifts = np.unravel_index(np.argmax(shifted_cross_power), shifted_cross_power.shape)
shifts = utils._interpolate(shifted_cross_power, shifts, rad=3)
# Map back to deviations from center
y_shifts[i] = shifts[0] - image_height // 2
x_shifts[i] = shifts[1] - image_width // 2
return y_shifts, x_shifts
def fix_outliers(y_shifts, x_shifts, max_y_shift=20, max_x_shift=20, method='median'):
""" Look for spikes in motion shifts and set them to a sensible value.
Reject any shift whose y or x shift is higher than max_y_shift/max_x_shift pixels
from the median/linear estimate/moving average. Outliers filled by interpolating
valid points; in the edges filled with the median/linear estimate/moving average.
:param np.array y_shifts/x_shifts: Shifts in y, x.
:param float max_y_shift/max_x_shifts: Number of pixels used as threshold to classify
a point as an outlier in y, x.
:param string method: One of 'mean' or 'trend'.
'median': Detect outliers as deviations from the median of the shifts.
'linear': Detect outliers as deviations from a line estimated from the shifts.
'trend': Detect outliers as deviations from the shift trend computed as a moving
average over the entire scan.
:returns: (y_shifts, x_shifts) Two arrays (num_frames) with the fixed motion shifts.
:returns: (outliers) A boolean array (num_frames) with True for outlier frames.
"""
# Basic checks
num_frames = len(y_shifts)
if num_frames < 5:
return y_shifts, x_shifts, np.full(num_frames, False)
# Copy shifts to avoid changing originals
y_shifts, x_shifts = y_shifts.copy(), x_shifts.copy()
# Detrend shifts
if method == 'median':
y_trend = np.median(y_shifts)
x_trend = np.median(x_shifts)
elif method == 'linear':
x_trend = _fit_robust_line(x_shifts)
y_trend = _fit_robust_line(y_shifts)
else: # trend
window_size = min(101, num_frames)
window_size -= 1 if window_size % 2 == 0 else 0
y_trend = mirrconv(y_shifts, np.ones(window_size) / window_size)
x_trend = mirrconv(x_shifts, np.ones(window_size) / window_size)
# Subtract trend from shifts
y_shifts -= y_trend
x_shifts -= x_trend
# Get outliers
outliers = np.logical_or(abs(y_shifts) > max_y_shift, abs(x_shifts) > max_x_shift)
# Interpolate outliers
num_outliers = np.sum(outliers)
if num_outliers < num_frames - 1: # at least two good points needed for interpolation
#indices = np.arange(len(x_shifts))
#y_shifts = np.interp(indices, indices[~outliers], y_shifts[~outliers], left=0, right=0)
#x_shifts = np.interp(indices, indices[~outliers], x_shifts[~outliers], left=0, right=0)
y_shifts[outliers] = 0
x_shifts[outliers] = 0
else:
print('Warning: {} out of {} frames were outliers.'.format(num_outliers, num_frames))
y_shifts = 0
x_shifts = 0
# Add trend back to shifts
y_shifts += y_trend
x_shifts += x_trend
return y_shifts, x_shifts, outliers
def _fit_robust_line(shifts):
""" Use a robust linear regression algorithm to fit a line to the data."""
from sklearn.linear_model import TheilSenRegressor
X = np.arange(len(shifts)).reshape(-1, 1)
y = shifts
model = TheilSenRegressor() # robust regression
model.fit(X, y)
line = model.predict(X)
return line
def correct_raster(scan, raster_phase, temporal_fill_fraction, in_place=True):
""" Raster correction for resonant scans.
Corrects multi-photon images in n-dimensional scans. Positive raster phase shifts
even lines to the left and odd lines to the right. Negative raster phase shifts even
lines to the right and odd lines to the left.
:param np.array scan: Volume with images to be corrected in the first two dimensions.
Works for 2-dimensions and up, usually (image_height, image_width, num_frames).
:param float raster_phase: Angle difference between expected and recorded scan angle.
:param float temporal_fill_fraction: Ratio between active acquisition and total
length of the scan line.
:param bool in_place: If True (default), the original array is modified in place.
:return: Raster-corrected scan.
:rtype: Same as scan if scan.dtype is subtype of np.float, else np.float32.
:raises: PipelineException
"""
# Basic checks
if not isinstance(scan, np.ndarray):
raise PipelineException('Scan needs to be a numpy array.')
if scan.ndim < 2:
raise PipelineException('Scan with less than 2 dimensions.')
# Assert scan is float
if not np.issubdtype(scan.dtype, np.floating):
print('Warning: Changing scan type from', str(scan.dtype), 'to np.float32')
scan = scan.astype(np.float32, copy=(not in_place))
elif not in_place:
scan = scan.copy() # copy it anyway preserving the original float dtype
# Get some dimensions
original_shape = scan.shape
image_height = original_shape[0]
image_width = original_shape[1]
# Scan angle at which each pixel was recorded.
max_angle = (np.pi / 2) * temporal_fill_fraction
scan_angles = np.linspace(-max_angle, max_angle, image_width + 2)[1:-1]
# We iterate over every image in the scan (first 2 dimensions). Same correction
# regardless of what channel, slice or frame they belong to.
reshaped_scan = np.reshape(scan, (image_height, image_width, -1))
num_images = reshaped_scan.shape[-1]
for i in range(num_images):
# Get current image
image = reshaped_scan[:, :, i]
# Correct even rows of the image (0, 2, ...)
interp_function = interp.interp1d(scan_angles, image[::2, :], bounds_error=False,
fill_value=0, copy=(not in_place))
reshaped_scan[::2, :, i] = interp_function(scan_angles + raster_phase)
# Correct odd rows of the image (1, 3, ...)
interp_function = interp.interp1d(scan_angles, image[1::2, :], bounds_error=False,
fill_value=0, copy=(not in_place))
reshaped_scan[1::2, :, i] = interp_function(scan_angles - raster_phase)
scan = np.reshape(reshaped_scan, original_shape)
return scan
def correct_motion(scan, x_shifts, y_shifts, in_place=True):
""" Motion correction for multi-photon scans.
Shifts each image in the scan x_shift pixels to the left and y_shift pixels up.
:param np.array scan: Volume with images to be corrected in the first two dimensions.
Works for 2-dimensions and up, usually (image_height, image_width, num_frames).
:param list/np.array x_shifts: 1-d array with x motion shifts for each image.
:param list/np.array y_shifts: 1-d array with x motion shifts for each image.
:param bool in_place: If True (default), the original array is modified in place.
:return: Motion corrected scan
:rtype: Same as scan if scan.dtype is subtype of np.float, else np.float32.
:raises: PipelineException
"""
# Basic checks
if not isinstance(scan, np.ndarray):
raise PipelineException('Scan needs to be a numpy array.')
if scan.ndim < 2:
raise PipelineException('Scan with less than 2 dimensions.')
if np.ndim(y_shifts) != 1 or np.ndim(x_shifts) != 1:
raise PipelineException('Dimension of one or both motion arrays differs from 1.')
if len(x_shifts) != len(y_shifts):
raise PipelineException('Length of motion arrays differ.')
# Assert scan is float (integer precision is not good enough)
if not np.issubdtype(scan.dtype, np.floating):
print('Warning: Changing scan type from', str(scan.dtype), 'to np.float32')
scan = scan.astype(np.float32, copy=(not in_place))
elif not in_place:
scan = scan.copy() # copy it anyway preserving the original dtype
# Get some dimensions
original_shape = scan.shape
image_height = original_shape[0]
image_width = original_shape[1]
# Reshape input (to deal with more than 2-D volumes)
reshaped_scan = np.reshape(scan, (image_height, image_width, -1))
if reshaped_scan.shape[-1] != len(x_shifts):
raise PipelineException('Scan and motion arrays have different dimensions')
# Ignore NaN values (present in some older data)
y_clean, x_clean = y_shifts.copy(), x_shifts.copy()
y_clean[np.logical_or(np.isnan(y_shifts), np.isnan(x_shifts))] = 0
x_clean[np.logical_or(np.isnan(y_shifts), np.isnan(x_shifts))] = 0
# Shift each frame
for i, (y_shift, x_shift) in enumerate(zip(y_clean, x_clean)):
image = reshaped_scan[:, :, i].copy()
ndimage.interpolation.shift(image, (-y_shift, -x_shift), order=1,
output=reshaped_scan[:, :, i])
scan = np.reshape(reshaped_scan, original_shape)
return scan | lgpl-3.0 |
michigraber/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 35412 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
georgid/sms-tools | lectures/7-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 2 | 2997 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |
tedunderwood/horizon | chapter3/code/reproduce_fictional_prestige.py | 1 | 7078 | #!/usr/bin/env python3
# reproduce_fictional_prestige.py
# Scripts to reproduce models
# used in Chapter Three,
# The Directions of Literary Change.
import csv, os, sys, pickle, math
# we add a path to be searched so that we can import
# versatiletrainer, which will do most of the work
# Versatiletrainer, and the modules it will in turn call,
# are publicly available in this github repo:
# https://github.com/tedunderwood/overlappingcategories
# mental note: when you file the book repo with Zenodo,
# a copy of the overlappingcategories repo also needs to
# be frozen
sys.path.append('/Users/tunder/Dropbox/python/logistic')
import versatiletrainer as train
import pandas as pd
# sourcefolder =
# extension =
# metadatapath =
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
def genre_gridsearch(metadatapath, modelname, c_range, ftstart, ftend, ftstep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1700, excl_above = 2000):
# Function does a gridsearch to identify an optimal number of features and setting of
# the regularization constant; then produces that model.
# sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF/'
sourcefolder = '../sourcefiles/'
extension = '.tsv'
#metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
vocabpath = '/Users/tunder/Dropbox/fiction/lexicon/' + modelname + '.txt'
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = '../results/' + modelname + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = excl_below
excludeabove['firstpub'] = excl_above
sizecap = 700
# CLASSIFY CONDITIONS
# print()
# print("You can also specify positive tags to be excluded from training, and/or a pair")
# print("of integer dates outside of which vols should be excluded from training.")
# print("If you add 'donotmatch' to the list of tags, these volumes will not be")
# print("matched with corresponding negative volumes.")
# print()
# ## testphrase = input("Comma-separated list of such tags: ")
testphrase = ''
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def applymodel(modelpath, metadatapath, outpath):
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF'
extension = '.tsv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
newmetadict.to_csv(outpath)
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
command = args[1]
if command == 'littlemagazines':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1500
featureend = 4000
featurestep = 100
genre_gridsearch('/Users/tunder/Dropbox/GenreProject/python/reception/fiction/littlemagazines.csv', 'littlemagazinespost1919', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'apply_quarter_century_models':
# We've previously trained models for each quarter-century
# of the fiction corpus: 1850-74, 75-99, and so on.
# Now we need to apply those models to the whole corpus
# in order to see how good their predictions are.
models = []
outpaths = []
for i in range (1850, 1950, 25):
modelpath = '../models/segment' + str(i) + '.pkl'
models.append(modelpath)
outpath = '../results/segment' + str(i) + '.applied.csv'
outpaths.append(outpath)
metadatapath = '../metadata/prestigeficmeta.csv'
for m, o in zip(models, outpaths):
applymodel(m, metadatapath, o)
elif command == 'gender_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4500
featurestep = 100
genre_gridsearch('../metadata/genderbalancedfiction.csv', 'gender_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'nation_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4000
featurestep = 100
genre_gridsearch('../metadata/nationbalancedfiction.csv', 'nation_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
| mit |
nguyentu1602/statsmodels | statsmodels/graphics/factorplots.py | 28 | 7596 | # -*- coding: utf-8 -*-
"""
Authors: Josef Perktold, Skipper Seabold, Denis A. Engemann
"""
from statsmodels.compat.python import get_function_name, iterkeys, lrange, zip, iteritems
import numpy as np
from statsmodels.graphics.plottools import rainbow
import statsmodels.graphics.utils as utils
def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b',
xlabel=None, ylabel=None, colors=[], markers=[],
linestyles=[], legendloc='best', legendtitle=None,
**kwargs):
"""
Interaction plot for factor level statistics.
Note. If categorial factors are supplied levels will be internally
recoded to integers. This ensures matplotlib compatiblity.
uses pandas.DataFrame to calculate an `aggregate` statistic for each
level of the factor or group given by `trace`.
Parameters
----------
x : array-like
The `x` factor levels constitute the x-axis. If a `pandas.Series` is
given its name will be used in `xlabel` if `xlabel` is None.
trace : array-like
The `trace` factor levels will be drawn as lines in the plot.
If `trace` is a `pandas.Series` its name will be used as the
`legendtitle` if `legendtitle` is None.
response : array-like
The reponse or dependent variable. If a `pandas.Series` is given
its name will be used in `ylabel` if `ylabel` is None.
func : function
Anything accepted by `pandas.DataFrame.aggregate`. This is applied to
the response variable grouped by the trace levels.
plottype : str {'line', 'scatter', 'both'}, optional
The type of plot to return. Can be 'l', 's', or 'b'
ax : axes, optional
Matplotlib axes instance
xlabel : str, optional
Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it
will use the series names.
ylabel : str, optional
Label to use for `response`. Default is 'func of response'. If
`response` is a `pandas.Series` it will use the series names.
colors : list, optional
If given, must have length == number of levels in trace.
linestyles : list, optional
If given, must have length == number of levels in trace.
markers : list, optional
If given, must have length == number of lovels in trace
kwargs
These will be passed to the plot command used either plot or scatter.
If you want to control the overall plotting options, use kwargs.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> np.random.seed(12345)
>>> weight = np.random.randint(1,4,size=60)
>>> duration = np.random.randint(1,3,size=60)
>>> days = np.log(np.random.randint(1,30, size=60))
>>> fig = interaction_plot(weight, duration, days,
... colors=['red','blue'], markers=['D','^'], ms=10)
>>> import matplotlib.pyplot as plt
>>> plt.show()
.. plot::
import numpy as np
from statsmodels.graphics.factorplots import interaction_plot
np.random.seed(12345)
weight = np.random.randint(1,4,size=60)
duration = np.random.randint(1,3,size=60)
days = np.log(np.random.randint(1,30, size=60))
fig = interaction_plot(weight, duration, days,
colors=['red','blue'], markers=['D','^'], ms=10)
import matplotlib.pyplot as plt
#plt.show()
"""
from pandas import DataFrame
fig, ax = utils.create_mpl_ax(ax)
response_name = ylabel or getattr(response, 'name', 'response')
ylabel = '%s of %s' % (get_function_name(func), response_name)
xlabel = xlabel or getattr(x, 'name', 'X')
legendtitle = legendtitle or getattr(trace, 'name', 'Trace')
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
x_values = x_levels = None
if isinstance(x[0], str):
x_levels = [l for l in np.unique(x)]
x_values = lrange(len(x_levels))
x = _recode(x, dict(zip(x_levels, x_values)))
data = DataFrame(dict(x=x, trace=trace, response=response))
plot_data = data.groupby(['trace', 'x']).aggregate(func).reset_index()
# return data
# check plot args
n_trace = len(plot_data['trace'].unique())
if linestyles:
try:
assert len(linestyles) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
linestyles = ['-'] * n_trace
if markers:
try:
assert len(markers) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
markers = ['.'] * n_trace
if colors:
try:
assert len(colors) == n_trace
except AssertionError as err:
raise ValueError("Must be a linestyle for each trace level")
else: # set a default
#TODO: how to get n_trace different colors?
colors = rainbow(n_trace)
if plottype == 'both' or plottype == 'b':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
marker=markers[i], label=label,
linestyle=linestyles[i], **kwargs)
elif plottype == 'line' or plottype == 'l':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.plot(group['x'], group['response'], color=colors[i],
label=label, linestyle=linestyles[i], **kwargs)
elif plottype == 'scatter' or plottype == 's':
for i, (values, group) in enumerate(plot_data.groupby(['trace'])):
# trace label
label = str(group['trace'].values[0])
ax.scatter(group['x'], group['response'], color=colors[i],
label=label, marker=markers[i], **kwargs)
else:
raise ValueError("Plot type %s not understood" % plottype)
ax.legend(loc=legendloc, title=legendtitle)
ax.margins(.1)
if all([x_levels, x_values]):
ax.set_xticks(x_values)
ax.set_xticklabels(x_levels)
return fig
def _recode(x, levels):
""" Recode categorial data to int factor.
Parameters
----------
x : array-like
array like object supporting with numpy array methods of categorially
coded data.
levels : dict
mapping of labels to integer-codings
Returns
-------
out : instance numpy.ndarray
"""
from pandas import Series
name = None
if isinstance(x, Series):
name = x.name
x = x.values
if x.dtype.type not in [np.str_, np.object_]:
raise ValueError('This is not a categorial factor.'
' Array of str type required.')
elif not isinstance(levels, dict):
raise ValueError('This is not a valid value for levels.'
' Dict required.')
elif not (np.unique(x) == np.unique(list(iterkeys(levels)))).all():
raise ValueError('The levels do not match the array values.')
else:
out = np.empty(x.shape[0], dtype=np.int)
for level, coding in iteritems(levels):
out[x == level] = coding
if name:
out = Series(out)
out.name = name
return out
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
reuk/wayverb | scripts/python/dispersion.py | 2 | 6340 | from math import e, pi
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors, ticker, cm
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import operator
def get_base_vectors(flip):
ret = [
np.array([0.0, 2.0 * np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
np.array([ np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
np.array([0.0, 0.0, -1.0]),
np.array([-np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]),
]
if flip:
ret = [np.array([1, -1, -1]) * i for i in ret]
return ret
def get_vectors():
ret = [i + j for i in get_base_vectors(False) for j in get_base_vectors(True)]
ret = filter(lambda x: np.any(x != np.array([0, 0, 0])), ret)
return ret
# DUYNE METHOD
def get_speed(arr):
"""
The diagrams in the paper appear to be continuous outside of the range
-1.5, 1.5.
However, this function has a strange discontinuity at a radius of 1.4
"""
def get_b(arr):
summed = sum([pow(e, 1j * np.dot(arr, i)) for i in get_vectors()])
return 1.0 - 0.25 * summed.real
def get_ang_g(arr):
b = get_b(arr)
return 0.5 * np.arctan(np.sqrt(4 - b * b) / abs(b))
c = np.sqrt(1.0 / 3.0)
norm = np.linalg.norm(arr)
# this analysis is only valid for frequencies below pi / 2
# (spectrum is mirrored above this limit)
# simulated frequency is equal to magnitude of wave vector (arr)
if norm < pi / 2:
return get_ang_g(arr) / (norm * c)
else:
return None
# CAMPOS METHOD
def get_speed_campos(arr):
def get_b(arr):
x, y, z = arr
a = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * y / np.sqrt(3.0))
b = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0))
c = np.cos(2.0 * y / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0))
return a + b + c - 1
def get_kd(arr):
return np.sqrt(3.0) * np.arccos(get_b(arr) / 2.0) / (2.0 * np.linalg.norm(arr))
return get_kd(arr)
# direction error analysis from @hacihabiboglu
# p(x) = pressure field in spatial(?) domain
# P(w) = pressure field in frequency domain
def get_U():
v = get_base_vectors(True)
U = np.vstack(v)
return U
def eq_21(u, w):
return pow(e, -1j * np.dot(u, w)) - 1
def eq_22(w):
return np.array([eq_21(i, w) for i in get_base_vectors(True)])
def eq_23(w):
return np.dot(np.linalg.pinv(get_U()), eq_22(w))
def hermitian_angle(a, b):
prod = np.dot(a, np.conj(b)).real
mag_a = np.sqrt(np.dot(a, np.conj(a)))
mag_b = np.sqrt(np.dot(b, np.conj(b)))
return (prod / (mag_a * mag_b)).real
def direction_difference(arr):
def get_term_1():
return eq_23(arr)
def get_term_2():
return 1j * arr
return hermitian_angle(get_term_1(), get_term_2())
# monte carlo bandwidth estimation
def random_three_vector():
phi = np.random.uniform(0, pi * 2)
costheta = np.random.uniform(-1, 1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
def get_max_valid_frequency(func, accuracy, starting_freq, increments, samples):
last = starting_freq + increments
ret = starting_freq
while True:
sample_points = [random_three_vector() * last for i in range(samples)]
sampled = [func(i) for i in sample_points]
if not all(map(lambda x: x > accuracy, sampled)):
return ret
else:
ret = last
last += increments
def main():
"""
This program duplicates the tetrahedral dispersion diagrams from the paper
'The Tetrahedral Digital Waveguide Mesh' buy Duyne and Smith.
I wrote it to try to understand how to do dispersion analysis - the
analysis here is of the difference of the actual wavefront speed to the
ideal speed.
"""
w = np.array([0, 1, 0])
w /= np.linalg.norm(w)
print "w", w
for i in get_base_vectors(True):
print "u", i
print "21", eq_21(i, w)
print "22", eq_22(w)
print "23", eq_23(w)
print
print direction_difference(w)
func = direction_difference
vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z])))
max_val = np.pi / 4
phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j]
XX = max_val * np.sin(phi) * np.cos(theta)
YY = max_val * np.sin(phi) * np.sin(theta)
ZZ = max_val * np.cos(phi)
zz = vfunc(XX, YY, ZZ)
zzmin, zzmax = zz.min(), zz.max()
print "dispersion error range:", zzmin, "to", zzmax
zz = (zz - zzmin) / (zzmax - zzmin)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(
XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz))
plt.show()
# func = get_speed_campos
# vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z])))
#
# min_accuracy = 0.99
# max_val = get_max_valid_frequency(func, min_accuracy, 0.1, 0.001, 20)
# print "maximum radius (frequency): ", max_val / (pi / 2)
# phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j]
# XX = max_val * np.sin(phi) * np.cos(theta)
# YY = max_val * np.sin(phi) * np.sin(theta)
# ZZ = max_val * np.cos(phi)
# zz = vfunc(XX, YY, ZZ)
# zzmin, zzmax = zz.min(), zz.max()
# print "dispersion error range:", zzmin, "to", zzmax
# zz = (zz - zzmin) / (zzmax - zzmin)
#
# fig = plt.figure()
#
# bounds = pi / 2
# N = 100
# x = np.linspace(-bounds, bounds, N)
# y = np.linspace(-bounds, bounds, N)
# X, Y = np.meshgrid(x, y)
# Z = np.zeros(X.shape)
# depth = np.linspace(0.9, 1, 11)
#
# ### plot 1
# ax = fig.add_subplot(221 + 0)
# z = vfunc(Z, X, Y)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ### plot 2
# ax = fig.add_subplot(221 + 1)
# z = vfunc(X, Z, Y)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ### plot 3
# ax = fig.add_subplot(221 + 2)
# z = vfunc(X, Y, Z)
# plt.contourf(X, Y, z, depth)
# cbar = plt.colorbar()
#
# ax = fig.add_subplot(224, projection='3d')
# ax.plot_surface(
# XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz))
#
# plt.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
mompiou/misorientation | misorientation.py | 1 | 48149 | #!/usr/bin/python
from __future__ import division
import numpy as np
from Tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from PIL import Image
from PIL import PngImagePlugin
import ttk
import sys
from fractions import Fraction
from tkFileDialog import *
import os
import matplotlib as mpl
mpl.rcParams['font.size'] = 12
###################################################################"
##### Fonction projection sur l'abaque
####################################################################
def proj(x,y,z):
if z==1:
X=0
Y=0
elif z<-0.000001:
X=250
Y=250
else:
X=x/(1+z)
Y=y/(1+z)
return np.array([X,Y],float)
###################################################################"
##### Fonction rotation
####################################################################
def rotation(phi1,phi,phi2):
phi1=phi1*np.pi/180;
phi=phi*np.pi/180;
phi2=phi2*np.pi/180;
R=np.array([[np.cos(phi1)*np.cos(phi2)-np.cos(phi)*np.sin(phi1)*np.sin(phi2),
-np.cos(phi)*np.cos(phi2)*np.sin(phi1)-np.cos(phi1)*
np.sin(phi2),np.sin(phi)*np.sin(phi1)],[np.cos(phi2)*np.sin(phi1)
+np.cos(phi)*np.cos(phi1)*np.sin(phi2),np.cos(phi)*np.cos(phi1)
*np.cos(phi2)-np.sin(phi1)*np.sin(phi2), -np.cos(phi1)*np.sin(phi)],
[np.sin(phi)*np.sin(phi2), np.cos(phi2)*np.sin(phi), np.cos(phi)]],float)
return R
####################################################################
##### Fonction rotation autour d'un axe
####################################################################
def Rot(th,a,b,c):
th=th*np.pi/180;
aa=a/np.linalg.norm([a,b,c]);
bb=b/np.linalg.norm([a,b,c]);
cc=c/np.linalg.norm([a,b,c]);
c1=np.array([[1,0,0],[0,1,0],[0,0,1]],float)
c2=np.array([[aa**2,aa*bb,aa*cc],[bb*aa,bb**2,bb*cc],[cc*aa,
cc*bb,cc**2]],float)
c3=np.array([[0,-cc,bb],[cc,0,-aa],[-bb,aa,0]],float)
R=np.cos(th)*c1+(1-np.cos(th))*c2+np.sin(th)*c3
return R
####################################################################
##### Fonction cristal
####################################################################
def crist():
global axesA,axeshA,axesB,axeshB,D,Dstar,V
a=eval(a_entry.get())
b=eval(b_entry.get())
c=eval(c_entry.get())
alp=eval(alp_entry.get())
bet=eval(bet_entry.get())
gam=eval(gam_entry.get())
e=eval(e_entry.get())
d2=eval(d_label_var.get())
alp=alp*np.pi/180;
bet=bet*np.pi/180;
gam=gam*np.pi/180;
V=a*b*c*np.sqrt(1-(np.cos(alp)**2)-(np.cos(bet))**2-(np.cos(gam))**2+2*b*c*np.cos(alp)*np.cos(bet)*np.cos(gam))
D=np.array([[a,b*np.cos(gam),c*np.cos(bet)],[0,b*np.sin(gam), c*(np.cos(alp)-np.cos(bet)*np.cos(gam))/np.sin(gam)],[0,0,V/(a*b*np.sin(gam))]])
Dstar=np.transpose(np.linalg.inv(D))
G=np.array([[a**2,a*b*np.cos(gam),a*c*np.cos(bet)],[a*b*np.cos(gam),b**2,b*c*np.cos(alp)],[a*c*np.cos(bet),b*c*np.cos(alp),c**2]])
axes=np.zeros(((2*e+1)**3-1,3))
axesh=np.zeros(((2*e+1)**3-1,3))
id=0
for i in range(-e,e+1):
for j in range(-e,e+1):
for k in range(-e,e+1):
if (i,j,k)!=(0,0,0):
d=1/(np.sqrt(np.dot(np.array([i,j,k]),np.dot(np.linalg.inv(G),np.array([i,j,k])))))
if d>d2*0.1*np.amax([a,b,c]):
if var_uvw.get()==0:
axesh[id,:]=np.dot(Dstar,np.array([i,j,k],float))
axes[id,:]=np.array([i,j,k],float)
else:
axesh[id,:]=np.dot(D,np.array([i,j,k],float))
axes[id,:]=np.array([i,j,k],float)
id=id+1
axesA=axes
axesB=axes
axeshA=axesh
axeshB=axesh
return axesA,axeshA,axesB,axeshB,D,Dstar,V
def dm():
global dmip
a=f.add_subplot(111)
a.figure.clear()
a=f.add_subplot(111)
dmip=dmip-eval(d_entry.get())
d_label_var.set(dmip)
crist()
trace()
return dmip
def dp():
global dmip
a=f.add_subplot(111)
a.figure.clear()
a=f.add_subplot(111)
dmip=dmip+eval(d_entry.get())
d_label_var.set(dmip)
crist()
trace()
return dmip
####################################################################
##### Fonction ajouter un pole
####################################################################
def poleA(pole1,pole2,pole3):
global MA,axesA,axeshA,Ta,V,D,Dstar
fp=f.add_subplot(111)
Gs=np.array([pole1,pole2,pole3],float)
Pp=np.zeros((1,2),float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MA,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
Pp=proj(S[0],S[1],S[2])*600/2
l=str(int(pole1))+str(int(pole2))+str(int(pole3))
fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro')
fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2))
fp.axis([0,600,0,600])
fp.axis('off')
fp.figure.canvas.draw()
axesA=np.vstack((axesA,np.array([pole1,pole2,pole3])))
axesA=np.vstack((axesA,np.array([-pole1,-pole2,-pole3])))
Ta=np.vstack((Ta,np.array([S[0],S[1],S[2]])))
Ta=np.vstack((Ta,np.array([-S[0],-S[1],-S[2]])))
axeshA=np.vstack((axeshA,np.array([Gsh[0],Gsh[1],Gsh[2]])))
axeshA=np.vstack((axeshA,np.array([-Gsh[0],-Gsh[1],-Gsh[2]])))
return axesA,axeshA,Ta
def poleB(pole1,pole2,pole3):
global MB,axesB,axeshB,Tb,V,D,Dstar
fp=f.add_subplot(111)
Gs=np.array([pole1,pole2,pole3],float)
Pp=np.zeros((1,2),float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MB,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
Pp=proj(S[0],S[1],S[2])*600/2
l=str(int(pole1))+str(int(pole2))+str(int(pole3))
fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro')
fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2))
fp.axis([0,600,0,600])
fp.axis('off')
fp.figure.canvas.draw()
axesB=np.vstack((axesB,np.array([pole1,pole2,pole3])))
axesB=np.vstack((axesB,np.array([-pole1,-pole2,-pole3])))
Tb=np.vstack((Tb,np.array([S[0],S[1],S[2]])))
Tb=np.vstack((Tb,np.array([-S[0],-S[1],-S[2]])))
axeshB=np.vstack((axeshB,np.array([Gsh[0],Gsh[1],Gsh[2]])))
axeshB=np.vstack((axeshB,np.array([-Gsh[0],-Gsh[1],-Gsh[2]])))
return axesB,axeshB,Tb
def addpoleA_sym():
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
poleA(pole1A,pole2A,pole3A)
poleA(pole1A,pole2A,-pole3A)
poleA(pole1A,-pole2A,pole3A)
poleA(-pole1A,pole2A,pole3A)
poleA(pole2A,pole1A,pole3A)
poleA(pole2A,pole1A,-pole3A)
poleA(pole2A,-pole1A,pole3A)
poleA(-pole2A,pole1A,pole3A)
poleA(pole2A,pole3A,pole1A)
poleA(pole2A,pole3A,-pole1A)
poleA(pole2A,-pole3A,pole1A)
poleA(-pole2A,pole3A,pole1A)
poleA(pole1A,pole3A,pole2A)
poleA(pole1A,pole3A,-pole2A)
poleA(pole1A,-pole3A,pole2A)
poleA(-pole1A,pole3A,pole2A)
poleA(pole3A,pole1A,pole2A)
poleA(pole3A,pole1A,-pole2A)
poleA(pole3A,-pole1A,pole2A)
poleA(-pole3A,pole1A,pole2A)
poleA(pole3A,pole2A,pole1A)
poleA(pole3A,pole2A,-pole1A)
poleA(pole3A,-pole2A,pole1A)
poleA(-pole3A,pole2A,pole1A)
trace()
def addpoleB_sym():
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
poleB(pole1B,pole2B,pole3B)
poleB(pole1B,pole2B,-pole3B)
poleB(pole1B,-pole2B,pole3B)
poleB(-pole1B,pole2B,pole3B)
poleB(pole2B,pole1B,pole3B)
poleB(pole2B,pole1B,-pole3B)
poleB(pole2B,-pole1B,pole3B)
poleB(-pole2B,pole1B,pole3B)
poleB(pole2B,pole3B,pole1B)
poleB(pole2B,pole3B,-pole1B)
poleB(pole2B,-pole3B,pole1B)
poleB(-pole2B,pole3B,pole1B)
poleB(pole1B,pole3B,pole2B)
poleB(pole1B,pole3B,-pole2B)
poleB(pole1B,-pole3B,pole2B)
poleB(-pole1B,pole3B,pole2B)
poleB(pole3B,pole1B,pole2B)
poleB(pole3B,pole1B,-pole2B)
poleB(pole3B,-pole1B,pole2B)
poleB(-pole3B,pole1B,pole2B)
poleB(pole3B,pole2B,pole1B)
poleB(pole3B,pole2B,-pole1B)
poleB(pole3B,-pole2B,pole1B)
poleB(-pole3B,pole2B,pole1B)
trace()
def addpoleA():
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
poleA(pole1A,pole2A,pole3A)
trace()
def addpoleB():
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
poleB(pole1B,pole2B,pole3B)
trace()
####################################################################
##### Fonction tracer plan
####################################################################
def trace_planA():
global MA,axes,axesh,Ta,V,D,Dstar
f2=f.add_subplot(111)
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
Gs=np.array([pole1A,pole2A,pole3A],float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MA,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1A=-pole1A
pole2A=-pole2A
pole3A=-pole3A
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((2,100))
Q=np.zeros((1,2))
if S[2]==0:
t=90
w=0
else:
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi-0.00001,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2
if A[0,w]<>75000:
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r')
f2.axis([0,600,0,600])
f2.axis('off')
f2.figure.canvas.draw()
trace()
def trace_planB():
global MB,axes,axesh,Tb,V,D,Dstar
f2=f.add_subplot(111)
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
Gs=np.array([pole1B,pole2B,pole3B],float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MB,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1B=-pole1B
pole2B=-pole2B
pole3B=-pole3B
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((2,100))
Q=np.zeros((1,2))
if S[2]==0:
t=90
w=0
else:
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi-0.00001,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2
if A[0,w]<>75000:
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r')
f2.axis([0,600,0,600])
f2.axis('off')
f2.figure.canvas.draw()
trace()
####################################################################
##### Click a pole
####################################################################
def click_a_pole(event):
global MB,Dstar,D
x=event.x
y=event.y
x=(x-411)*2/620
y=-(y-400)*2/620
X=2*x/(1+x**2+y**2)
Y=2*y/(1+x**2+y**2)
Z=(-1+x**2+y**2)/(1+x**2+y**2)
if Z<0:
X=-X
Y=-Y
A=np.dot(np.linalg.inv(MB),np.array([X,Y,Z]))
n=0
L=np.zeros((3,16**3))
for i in range(-8,9,1):
for j in range(-8,9,1):
for k in range(-8,9,1):
if np.linalg.norm([i,j,k])<>0:
if var_uvw.get()==0:
Q=np.dot(Dstar,np.array([i,j,k],float))/np.linalg.norm(np.dot(Dstar,np.array([i,j,k],float)))
if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05:
L[:,n]=np.array([i,j,k],float)
n=n+1
else:
Q=np.dot(D,np.array([i,j,k],float))/np.linalg.norm(np.dot(D,np.array([i,j,k],float)))
if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05:
L[:,n]=np.array([i,j,k],float)
n=n+1
if np.linalg.norm(L[:,0])<>0:
poleB(L[0,0],L[1,0],L[2,0])
trace()
####################################################################
##### Inclinaison-beta
####################################################################
####################################################################
##### Fonction desorientation
####################################################################
def Rota(t,u,v,w,g):
Ae=np.dot(g,np.array([u,v,w]))
Re=Rot(t,Ae[0],Ae[1],Ae[2])
return Re
def cryststruct():
global cs
a=eval(a_entry.get())
b=eval(b_entry.get())
c=eval(c_entry.get())
alp=eval(alp_entry.get())
bet=eval(bet_entry.get())
gam=eval(gam_entry.get())
if gam==90 and alp==90 and bet==90 and a==b and b==c:
cs=1
if gam==120 and alp==90 and bet==90:
cs=2
if gam==90 and alp==90 and bet==90 and a==b and b<>c:
cs=3
if alp<>90 and a==b and b==c:
cs=4
if gam==90 and alp==90 and bet==90 and a<>b and b<>c:
cs=5
if gam<>90 and alp==90 and bet==90 and a<>b and b<>c:
cs=6
if gam<>90 and alp<>90 and bet<>90 and a<>b and b<>c:
cs=7
return cs
def Sy(g):
global cs
if cs==1:
S1=Rota(90,1,0,0,g);
S2=Rota(180,1,0,0,g);
S3=Rota(270,1,0,0,g);
S4=Rota(90,0,1,0,g);
S5=Rota(180,0,1,0,g);
S6=Rota(270,0,1,0,g);
S7=Rota(90,0,0,1,g);
S8=Rota(180,0,0,1,g);
S9=Rota(270,0,0,1,g);
S10=Rota(180,1,1,0,g);
S11=Rota(180,1,0,1,g);
S12=Rota(180,0,1,1,g);
S13=Rota(180,-1,1,0,g);
S14=Rota(180,-1,0,1,g);
S15=Rota(180,0,-1,1,g);
S16=Rota(120,1,1,1,g);
S17=Rota(240,1,1,1,g);
S18=Rota(120,-1,1,1,g);
S19=Rota(240,-1,1,1,g);
S20=Rota(120,1,-1,1,g);
S21=Rota(240,1,-1,1,g);
S22=Rota(120,1,1,-1,g);
S23=Rota(240,1,1,-1,g);
S24=np.eye(3,3);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12,S13,S14,S15,S16,S17,S18,S19,S20,S21,S22,S23,S24))
if cs==2:
S1=Rota(60,0,0,1,g);
S2=Rota(120,0,0,1,g);
S3=Rota(180,0,0,1,g);
S4=Rota(240,0,0,1,g);
S5=Rota(300,0,0,1,g);
S6=np.eye(3,3);
S7=Rota(180,0,0,1,g);
S8=Rota(180,0,1,0,g);
S9=Rota(180,1/2,np.sqrt(3)/2,0,g);
S10=Rota(180,-1/2,np.sqrt(3)/2,0,g);
S11=Rota(180,np.sqrt(3)/2,1/2,0,g);
S12=Rota(180,-np.sqrt(3)/2,1/2,0,g);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12))
if cs==3:
S1=Rota(90,0,0,1,g);
S2=Rota(180,0,0,1,g);
S3=Rota(270,0,0,1,g);
S4=Rota(180,0,1,0,g);
S5=Rota(180,1,0,0,g);
S6=Rota(180,1,1,0,g);
S7=Rota(180,1,-1,0,g);
S8=np.eye(3,3)
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8))
if cs==4:
S1=Rota(60,0,0,1,g);
S2=Rota(120,0,0,1,g);
S3=Rota(180,0,0,1,g);
S4=Rota(240,0,0,1,g);
S5=Rota(300,0,0,1,g);
S6=np.eye(3,3);
S7=Rota(180,0,0,1,g);
S8=Rota(180,0,1,0,g);
S9=Rota(180,1/2,np.sqrt(3)/2,0,g);
S10=Rota(180,-1/2,np.sqrt(3)/2,0,g);
S11=Rota(180,np.sqrt(3)/2,1/2,0,g);
S12=Rota(180,-np.sqrt(3)/2,1/2,0,g);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12))
if cs==5:
S1=Rota(180,0,0,1,g);
S2=Rota(180,1,0,0,g);
S3=Rota(180,0,1,0,g);
S4=np.eye(3,3);
S=np.vstack((S1,S2,S3,S4))
if cs==6:
S1=Rota(180,0,1,0,g);
S2=np.eye(3,3);
S=np.vstack((S1,S2))
if cs==7:
S=np.eye(3,3);
return S
def null(A, rcond=None):
u, s, vh = np.linalg.svd(A, full_matrices=True)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = np.finfo(s.dtype).eps * max(M, N)
tol = np.amax(s) * rcond
num = np.sum(s > tol, dtype=int)
Q = vh[num:,:].T.conj()
return Q
def desorientation():
global D0,S,D1,cs,V,Qp
a = f.add_subplot(111)
a.figure.clear()
a = f.add_subplot(111)
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
cryststruct()
phi1a=eval(phi1A_entry.get())
phia=eval(phiA_entry.get())
phi2a=eval(phi2A_entry.get())
phi1b=eval(phi1B_entry.get())
phib=eval(phiB_entry.get())
phi2b=eval(phi2B_entry.get())
gA=rotation(phi1a,phia,phi2a)
gB=rotation(phi1b,phib,phi2b)
k=0
S=Sy(gA)
D0=np.zeros((int(np.shape(S)[0]/3),5))
D1=np.zeros((int(np.shape(S)[0]/3),3))
Qp=np.zeros((int(np.shape(S)[0]/3),2))
for i in range(0,np.shape(S)[0],3):
In=np.dot(np.array([[S[i,0],S[i+1,0],S[i+2,0]],[S[i,1],S[i+1,1],S[i+2,1]],[S[i,2],S[i+1,2],S[i+2,2]]]),gA)
Ing=np.dot(In,np.array([0,0,1]))
In2=np.dot(Rot(-phi2b,Ing[0],Ing[1],Ing[2]),In)
Ing2=np.dot(In2,np.array([1,0,0]))
In3=np.dot(Rot(-phib,Ing2[0],Ing2[1],Ing2[2]),In2)
Ing3=np.dot(In3,np.array([0,0,1]))
A=np.dot(Rot(-phi1b,Ing3[0],Ing3[1],Ing3[2]),In3)-np.eye(3)
V=null(A,0.001).T
if 0.5*(np.trace(A+np.eye(3))-1)>1:
D0[k,3]=0
elif 0.5*(np.trace(A+np.eye(3))-1)<-1:
D0[k,3]=180
else:
D0[k,3]=np.arccos(0.5*(np.trace(A+np.eye(3))-1))*180/np.pi
if np.abs(D0[k,3])<1e-5:
D0[k,0]=0
D0[k,1]=0
D0[k,2]=0
else:
D0[k,0]=V[0,0]/np.linalg.norm(V)
D0[k,1]=V[0,1]/np.linalg.norm(V)
D0[k,2]=V[0,2]/np.linalg.norm(V)
Ds1=np.dot(np.linalg.inv(gB),np.array([D0[k,0],D0[k,1],D0[k,2]]))
F0=Fraction(Ds1[0]).limit_denominator(10)
F1=Fraction(Ds1[1]).limit_denominator(10)
F2=Fraction(Ds1[2]).limit_denominator(10)
D1[k,0]=F0.numerator*F1.denominator*F2.denominator
D1[k,1]=F1.numerator*F0.denominator*F2.denominator
D1[k,2]=F2.numerator*F0.denominator*F1.denominator
if D0[k,2]<0:
D0[k,0]=-D0[k,0]
D0[k,1]=-D0[k,1]
D0[k,2]=-D0[k,2]
D1[k,0]=-D1[k,0]
D1[k,1]=-D1[k,1]
D1[k,2]=-D1[k,2]
D0[k,4]=k
Qp[k,:]=proj(D0[k,0],D0[k,1],D0[k,2])*600/2
k=k+1
a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
trace()
return Qp,S,D1
####################################################################
##### Fonction principale
####################################################################
def trace():
global Ta,Tb,axesA,axeshA,MA,axesB,axeshB,MB,Qp,S,D1,show_ind,D0
a = f.add_subplot(111)
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
Pa=np.zeros((np.shape(axesA)[0],2))
Pb=np.zeros((np.shape(axesB)[0],2))
for i in range(0,np.shape(axesA)[0]):
axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:])
Ta[i,:]=np.dot(MA,axeshA[i,:])
Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2
if show_ind.get()==1:
m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])])
if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m):
sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2]))
a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2))
for i in range(0,np.shape(axesB)[0]):
axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:])
Tb[i,:]=np.dot(MB,axeshB[i,:])
Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2
if show_ind.get()==1:
m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])])
if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m):
sB=str(int(axesB[i,0]/m))+str(int(axesB[i,1]/m))+str(int(axesB[i,2]/m))
else:
sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2]))
a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2))
for l in range(0,int(np.shape(S)[0]/3)):
if show_angle.get()==1:
sangle=str(np.round(D0[l,3],decimals=1))
a.annotate(sangle,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8)
if show_axe.get()==1:
saxe=str(int(D1[l,0]))+','+str(int(D1[l,1]))+','+str(int(D1[l,2]))
a.annotate(saxe,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8)
if show_num.get()==1:
snum=str(int(D0[l,4]))
a.annotate(snum,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=10)
a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo')
a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go')
a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
def princ():
global Ta,Tb,MA,MB
a = f.add_subplot(111)
a.figure.clear()
a = f.add_subplot(111)
phi1a=eval(phi1A_entry.get())
phia=eval(phiA_entry.get())
phi2a=eval(phi2A_entry.get())
phi1b=eval(phi1B_entry.get())
phib=eval(phiB_entry.get())
phi2b=eval(phi2B_entry.get())
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
crist()
Pa=np.zeros((np.shape(axesA)[0],2))
Ta=np.zeros((np.shape(axesA)))
Pb=np.zeros((np.shape(axesB)[0],2))
Tb=np.zeros((np.shape(axesB)))
for i in range(0,np.shape(axesA)[0]):
axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:])
Ta[i,:]=np.dot(rotation(phi1a,phia,phi2a),axeshA[i,:])
Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2
m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])])
if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m):
sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2]))
a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2))
for i in range(0,np.shape(axesB)[0]):
axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:])
Tb[i,:]=np.dot(rotation(phi1b,phib,phi2b),axeshB[i,:])
Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2
m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])])
if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m):
sB=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2]))
a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2))
a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo')
a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
MA=rotation(phi1a,phia,phi2a)
MB=rotation(phi1b,phib,phi2b)
return Ta,MA,MB,Tb
######################################################################
# GUI
######################################################################
def file_save():
global D1,D0,D
fout = asksaveasfile(mode='w', defaultextension=".txt")
for i in range(np.shape(D1)[0]):
text2save = str(int(D0[i,4]))+'\t'+'['+str(int(D1[i,0]))+','+str(int(D1[i,1]))+','+str(int(D1[i,2]))+']'+'\t '+str(np.around(D0[i,3],decimals=2))
fout.write("%s\n" % text2save)
fout.close()
def image_save():
s = asksaveasfile(mode='w', defaultextension=".jpg")
if s:
f.savefig(s.name)
#s.close()
####################################################
#fonction d'initialisation
##################################################
def init():
global var_uvw,D1,S,Qp,show_ind,show_angle,show_axe,show_num,dmip,d_label_var
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
a = f.add_subplot(111)
a.axis('off')
a.imshow(img,interpolation="bicubic")
a.figure.canvas.draw()
S=np.zeros((1,5))
Qp=np.zeros((1,2))
D1=np.zeros((1,5))
var_uvw=IntVar()
show_ind=IntVar()
show_angle=IntVar()
show_axe=IntVar()
show_num=IntVar()
d_label_var=StringVar()
d_label_var.set(0)
dmip=0
return var_uvw,show_ind,show_angle,show_axe,show_num
##############################################################
# fonction pour quitter
#######################################################
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
#############################################################
root = Tk()
root.wm_title("Misorientation")
root.geometry('1220x798+10+40')
root.configure(bg = '#BDBDBD')
#root.resizable(0,0)
#s=ttk.Style()
#s.theme_use('clam')
style = ttk.Style()
theme = style.theme_use()
default = style.lookup(theme, 'background')
################################################
# Creation d'une zone pour tracer des graphiques
################################################
f = Figure(facecolor='white',figsize=[2,2],dpi=100)
canvas = FigureCanvasTkAgg(f, master=root)
canvas.get_tk_widget().place(x=0,y=0,height=800,width=800)
canvas._tkcanvas.bind('<Button-3>', click_a_pole)
canvas.show()
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.zoom('off')
toolbar.update()
###################################################
init()
#import _imaging
#print _imaging.__file__
##############################################
# Boutons
##############################################
phi1A_entry = Entry (master=root)
phi1A_entry.place(relx=0.72,rely=0.5,relheight=0.03,relwidth=0.07)
phi1A_entry.configure(background="white")
phi1A_entry.configure(foreground="black")
phi1A_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi1A_entry.configure(highlightcolor="#000000")
phi1A_entry.configure(insertbackground="#000000")
phi1A_entry.configure(selectbackground="#c4c4c4")
phi1A_entry.configure(selectforeground="black")
phiA_entry = Entry (master=root)
phiA_entry.place(relx=0.72,rely=0.55,relheight=0.03,relwidth=0.07)
phiA_entry.configure(background="white")
phiA_entry.configure(foreground="black")
phiA_entry.configure(highlightcolor="black")
phiA_entry.configure(insertbackground="black")
phiA_entry.configure(selectbackground="#c4c4c4")
phiA_entry.configure(selectforeground="black")
label_euler = Label (master=root)
label_euler.place(relx=0.77,rely=0.42,height=46,width=163)
label_euler.configure(activebackground="#cccccc")
label_euler.configure(activeforeground="black")
label_euler.configure(cursor="fleur")
label_euler.configure(foreground="black")
label_euler.configure(highlightcolor="black")
label_euler.configure(text='''Euler angles \n A blue , B green''')
phi2A_entry = Entry (master=root)
phi2A_entry.place(relx=0.72,rely=0.6,relheight=0.03,relwidth=0.07)
phi2A_entry.configure(background="white")
phi2A_entry.configure(foreground="black")
phi2A_entry.configure(highlightcolor="black")
phi2A_entry.configure(insertbackground="black")
phi2A_entry.configure(selectbackground="#c4c4c4")
phi2A_entry.configure(selectforeground="black")
button_trace = Button (master=root)
button_trace.place(relx=0.7,rely=0.66,height=21,width=49)
button_trace.configure(activebackground="#f9f9f9")
button_trace.configure(activeforeground="black")
button_trace.configure(background="#ff0000")
button_trace.configure(command=princ)
button_trace.configure(foreground="black")
button_trace.configure(highlightcolor="black")
button_trace.configure(pady="0")
button_trace.configure(text='''PLOT''')
Phi1A_label = Label (master=root)
Phi1A_label.place(relx=0.67,rely=0.5,height=19,width=50)
Phi1A_label.configure(activebackground="#cccccc")
Phi1A_label.configure(activeforeground="black")
Phi1A_label.configure(foreground="black")
Phi1A_label.configure(highlightcolor="black")
Phi1A_label.configure(text='''Phi1A''')
PhiA_label = Label (master=root)
PhiA_label.place(relx=0.67,rely=0.55,height=19,width=50)
PhiA_label.configure(activebackground="#cccccc")
PhiA_label.configure(activeforeground="black")
PhiA_label.configure(foreground="black")
PhiA_label.configure(highlightcolor="black")
PhiA_label.configure(text='''PhiA''')
Phi2A_label = Label (master=root)
Phi2A_label.place(relx=0.67,rely=0.6,height=19,width=50)
Phi2A_label.configure(activebackground="#cccccc")
Phi2A_label.configure(activeforeground="black")
Phi2A_label.configure(foreground="black")
Phi2A_label.configure(highlightcolor="black")
Phi2A_label.configure(text='''Phi2A''')
phi1B_entry = Entry (master=root)
phi1B_entry.place(relx=0.86,rely=0.5,relheight=0.03,relwidth=0.07)
phi1B_entry.configure(background="white")
phi1B_entry.configure(foreground="black")
phi1B_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi1B_entry.configure(highlightcolor="#000000")
phi1B_entry.configure(insertbackground="#000000")
phi1B_entry.configure(selectbackground="#c4c4c4")
phi1B_entry.configure(selectforeground="black")
Phi1B = Label (master=root)
Phi1B.place(relx=0.81,rely=0.5,height=19,width=50)
Phi1B.configure(activebackground="#cccccc")
Phi1B.configure(activeforeground="black")
Phi1B.configure(foreground="black")
Phi1B.configure(highlightcolor="black")
Phi1B.configure(text='''Phi1B''')
PhiB_label1 = Label (master=root)
PhiB_label1.place(relx=0.81,rely=0.55,height=19,width=50)
PhiB_label1.configure(activebackground="#cccccc")
PhiB_label1.configure(activeforeground="black")
PhiB_label1.configure(foreground="black")
PhiB_label1.configure(highlightcolor="black")
PhiB_label1.configure(text='''PhiB''')
Phi2B_label2 = Label (master=root)
Phi2B_label2.place(relx=0.81,rely=0.6,height=19,width=50)
Phi2B_label2.configure(activebackground="#cccccc")
Phi2B_label2.configure(activeforeground="black")
Phi2B_label2.configure(foreground="black")
Phi2B_label2.configure(highlightcolor="black")
Phi2B_label2.configure(text='''Phi2B''')
phiB_entry = Entry (master=root)
phiB_entry.place(relx=0.86,rely=0.55,relheight=0.03,relwidth=0.07)
phiB_entry.configure(background="white")
phiB_entry.configure(foreground="black")
phiB_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phiB_entry.configure(highlightcolor="#000000")
phiB_entry.configure(insertbackground="#000000")
phiB_entry.configure(selectbackground="#c4c4c4")
phiB_entry.configure(selectforeground="black")
phi2B_entry = Entry (master=root)
phi2B_entry.place(relx=0.86,rely=0.6,relheight=0.03,relwidth=0.07)
phi2B_entry.configure(background="white")
phi2B_entry.configure(foreground="black")
phi2B_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi2B_entry.configure(highlightcolor="#000000")
phi2B_entry.configure(insertbackground="#000000")
phi2B_entry.configure(selectbackground="#c4c4c4")
phi2B_entry.configure(selectforeground="black")
button_desorientation = Button (master=root)
button_desorientation.place(relx=0.81,rely=0.66,height=21,width=124)
button_desorientation.configure(activebackground="#f9f9f9")
button_desorientation.configure(activeforeground="black")
button_desorientation.configure(background="#00ff00")
button_desorientation.configure(command=desorientation)
button_desorientation.configure(foreground="black")
button_desorientation.configure(highlightcolor="black")
button_desorientation.configure(pady="0")
button_desorientation.configure(text='''MISORIENTATION''')
Cristal_label = Label (master=root)
Cristal_label.place(relx=0.66,rely=0.03,height=19,width=142)
Cristal_label.configure(text='''Crystal Parameters''')
a_cristal_label = Label (master=root)
a_cristal_label.place(relx=0.68,rely=0.06,height=19,width=12)
a_cristal_label.configure(text='''a''')
b_cristal_label = Label (master=root)
b_cristal_label.place(relx=0.68,rely=0.1,height=19,width=12)
b_cristal_label.configure(activebackground="#f9f9f9")
b_cristal_label.configure(activeforeground="black")
b_cristal_label.configure(foreground="black")
b_cristal_label.configure(highlightcolor="black")
b_cristal_label.configure(text='''b''')
c_cristal_label = Label (master=root)
c_cristal_label.place(relx=0.68,rely=0.14,height=19,width=11)
c_cristal_label.configure(activebackground="#f9f9f9")
c_cristal_label.configure(activeforeground="black")
c_cristal_label.configure(foreground="black")
c_cristal_label.configure(highlightcolor="black")
c_cristal_label.configure(text='''c''')
alp_cristal_label = Label (master=root)
alp_cristal_label.place(relx=0.67,rely=0.18,height=19,width=42)
alp_cristal_label.configure(activebackground="#f9f9f9")
alp_cristal_label.configure(activeforeground="black")
alp_cristal_label.configure(foreground="black")
alp_cristal_label.configure(highlightcolor="black")
alp_cristal_label.configure(text='''alpha''')
bet_cristal_label = Label (master=root)
bet_cristal_label.place(relx=0.67,rely=0.22,height=19,width=42)
bet_cristal_label.configure(activebackground="#f9f9f9")
bet_cristal_label.configure(activeforeground="black")
bet_cristal_label.configure(foreground="black")
bet_cristal_label.configure(highlightcolor="black")
bet_cristal_label.configure(text='''beta''')
gam_cristal_label = Label (master=root)
gam_cristal_label.place(relx=0.66,rely=0.26,height=19,width=52)
gam_cristal_label.configure(activebackground="#f9f9f9")
gam_cristal_label.configure(activeforeground="black")
gam_cristal_label.configure(foreground="black")
gam_cristal_label.configure(highlightcolor="black")
gam_cristal_label.configure(text='''gamma''')
a_entry = Entry (master=root)
a_entry.place(relx=0.7,rely=0.06,relheight=0.03,relwidth=0.06)
a_entry.configure(background="white")
a_entry.configure(insertbackground="black")
b_entry = Entry (master=root)
b_entry.place(relx=0.7,rely=0.1,relheight=0.03,relwidth=0.06)
b_entry.configure(background="white")
b_entry.configure(foreground="black")
b_entry.configure(highlightcolor="black")
b_entry.configure(insertbackground="black")
b_entry.configure(selectbackground="#c4c4c4")
b_entry.configure(selectforeground="black")
c_entry = Entry (master=root)
c_entry.place(relx=0.7,rely=0.14,relheight=0.03,relwidth=0.06)
c_entry.configure(background="white")
c_entry.configure(foreground="black")
c_entry.configure(highlightcolor="black")
c_entry.configure(insertbackground="black")
c_entry.configure(selectbackground="#c4c4c4")
c_entry.configure(selectforeground="black")
alp_entry = Entry (master=root)
alp_entry.place(relx=0.71,rely=0.18,relheight=0.03,relwidth=0.06)
alp_entry.configure(background="white")
alp_entry.configure(foreground="black")
alp_entry.configure(highlightcolor="black")
alp_entry.configure(insertbackground="black")
alp_entry.configure(selectbackground="#c4c4c4")
alp_entry.configure(selectforeground="black")
bet_entry = Entry (master=root)
bet_entry.place(relx=0.71,rely=0.22,relheight=0.03,relwidth=0.06)
bet_entry.configure(background="white")
bet_entry.configure(foreground="black")
bet_entry.configure(highlightcolor="black")
bet_entry.configure(insertbackground="black")
bet_entry.configure(selectbackground="#c4c4c4")
bet_entry.configure(selectforeground="black")
gam_entry = Entry (master=root)
gam_entry.place(relx=0.71,rely=0.26,relheight=0.03,relwidth=0.06)
gam_entry.configure(background="white")
gam_entry.configure(foreground="black")
gam_entry.configure(highlightcolor="black")
gam_entry.configure(insertbackground="black")
gam_entry.configure(selectbackground="#c4c4c4")
gam_entry.configure(selectforeground="black")
uvw_button = Checkbutton (master=root)
uvw_button.place(relx=0.75,rely=0.66,relheight=0.03,relwidth=0.04)
uvw_button.configure(text='''uvw''')
uvw_button.configure(variable=var_uvw)
e_label = Label (master=root)
e_label.place(relx=0.66,rely=0.31,height=19,width=86)
e_label.configure(text='''Max indices''')
e_entry = Entry (master=root)
e_entry.place(relx=0.74,rely=0.31,relheight=0.03,relwidth=0.05)
e_entry.configure(background="white")
e_entry.configure(insertbackground="black")
e2_label = Label (master=root)
e2_label.place(relx=0.68,rely=0.36,height=19,width=12)
e2_label.configure(text='''d''')
dm_button = Button (master=root)
dm_button.place(relx=0.7,rely=0.36,height=21,width=13)
dm_button.configure(activebackground="#f9f9f9")
dm_button.configure(activeforeground="black")
dm_button.configure(command=dm)
dm_button.configure(foreground="black")
dm_button.configure(highlightcolor="black")
dm_button.configure(pady="0")
dm_button.configure(text='''-''')
d_entry = Entry (master=root)
d_entry.place(relx=0.72,rely=0.36,relheight=0.02,relwidth=0.04)
d_entry.configure(background="white")
d_entry.configure(foreground="black")
d_entry.configure(highlightcolor="black")
d_entry.configure(insertbackground="black")
d_entry.configure(selectbackground="#c4c4c4")
d_entry.configure(selectforeground="black")
dp_button = Button (master=root)
dp_button.place(relx=0.76,rely=0.36,height=21,width=17)
dp_button.configure(activebackground="#f9f9f9")
dp_button.configure(activeforeground="black")
dp_button.configure(command=dp)
dp_button.configure(foreground="black")
dp_button.configure(highlightcolor="black")
dp_button.configure(pady="0")
dp_button.configure(text='''+''')
d_label = Label (master=root)
d_label.place(relx=0.73,rely=0.39,height=19,width=16)
d_label.configure(textvariable=d_label_var)
label_addpoleA = Label (master=root)
label_addpoleA.place(relx=0.81,rely=0.03,height=19,width=90)
label_addpoleA.configure(activebackground="#cccccc")
label_addpoleA.configure(activeforeground="black")
label_addpoleA.configure(foreground="black")
label_addpoleA.configure(highlightcolor="black")
label_addpoleA.configure(text='''Add pole A''')
pole1A_entry = Entry (master=root)
pole1A_entry.place(relx=0.81,rely=0.06,relheight=0.02
,relwidth=0.04)
pole1A_entry.configure(background="white")
pole1A_entry.configure(foreground="black")
pole1A_entry.configure(highlightcolor="black")
pole1A_entry.configure(insertbackground="black")
pole1A_entry.configure(selectbackground="#c4c4c4")
pole1A_entry.configure(selectforeground="black")
pole2A_entry = Entry (master=root)
pole2A_entry.place(relx=0.87,rely=0.06,relheight=0.02
,relwidth=0.04)
pole2A_entry.configure(background="white")
pole2A_entry.configure(foreground="black")
pole2A_entry.configure(highlightcolor="black")
pole2A_entry.configure(insertbackground="black")
pole2A_entry.configure(selectbackground="#c4c4c4")
pole2A_entry.configure(selectforeground="black")
pole3A_entry = Entry (master=root)
pole3A_entry.place(relx=0.93,rely=0.06,relheight=0.02
,relwidth=0.04)
pole3A_entry.configure(background="white")
pole3A_entry.configure(foreground="black")
pole3A_entry.configure(highlightcolor="black")
pole3A_entry.configure(insertbackground="black")
pole3A_entry.configure(selectbackground="#c4c4c4")
pole3A_entry.configure(selectforeground="black")
addpoleA_button = Button (master=root)
addpoleA_button.place(relx=0.81,rely=0.11,height=31,width=57)
addpoleA_button.configure(activebackground="#f9f9f9")
addpoleA_button.configure(activeforeground="black")
addpoleA_button.configure(command=addpoleA)
addpoleA_button.configure(foreground="black")
addpoleA_button.configure(highlightcolor="black")
addpoleA_button.configure(pady="0")
addpoleA_button.configure(text='''Add''')
symA_button = Button (master=root)
symA_button.place(relx=0.87,rely=0.11,height=31,width=71)
symA_button.configure(command=addpoleA_sym)
symA_button.configure(pady="0")
symA_button.configure(text='''Symetry''')
trace_planA_button = Button (master=root)
trace_planA_button.place(relx=0.93,rely=0.11,height=31,width=81)
trace_planA_button.configure(command=trace_planA)
trace_planA_button.configure(pady="0")
trace_planA_button.configure(text='''Draw plane''')
label_addpoleB = Label (master=root)
label_addpoleB.place(relx=0.81,rely=0.2,height=19,width=90)
label_addpoleB.configure(activebackground="#cccccc")
label_addpoleB.configure(activeforeground="black")
label_addpoleB.configure(foreground="black")
label_addpoleB.configure(highlightcolor="black")
label_addpoleB.configure(text='''Add pole B''')
pole1B_entry = Entry (master=root)
pole1B_entry.place(relx=0.81,rely=0.24,relheight=0.02
,relwidth=0.04)
pole1B_entry.configure(background="white")
pole1B_entry.configure(foreground="black")
pole1B_entry.configure(highlightcolor="black")
pole1B_entry.configure(insertbackground="black")
pole1B_entry.configure(selectbackground="#c4c4c4")
pole1B_entry.configure(selectforeground="black")
pole2B_entry = Entry (master=root)
pole2B_entry.place(relx=0.87,rely=0.24,relheight=0.02
,relwidth=0.04)
pole2B_entry.configure(background="white")
pole2B_entry.configure(foreground="black")
pole2B_entry.configure(highlightcolor="black")
pole2B_entry.configure(insertbackground="black")
pole2B_entry.configure(selectbackground="#c4c4c4")
pole2B_entry.configure(selectforeground="black")
pole3B_entry = Entry (master=root)
pole3B_entry.place(relx=0.93,rely=0.24,relheight=0.02
,relwidth=0.04)
pole3B_entry.configure(background="white")
pole3B_entry.configure(foreground="black")
pole3B_entry.configure(highlightcolor="black")
pole3B_entry.configure(insertbackground="black")
pole3B_entry.configure(selectbackground="#c4c4c4")
pole3B_entry.configure(selectforeground="black")
addpoleB_button = Button (master=root)
addpoleB_button.place(relx=0.81,rely=0.28,height=31,width=55)
addpoleB_button.configure(activebackground="#f9f9f9")
addpoleB_button.configure(activeforeground="black")
addpoleB_button.configure(command=addpoleB)
addpoleB_button.configure(foreground="black")
addpoleB_button.configure(highlightcolor="black")
addpoleB_button.configure(pady="0")
addpoleB_button.configure(text='''Add''')
symB_button = Button (master=root)
symB_button.place(relx=0.87,rely=0.28,height=31,width=71)
symB_button.configure(command=addpoleB_sym)
symB_button.configure(pady="0")
symB_button.configure(text='''Symetry''')
trace_planB_button = Button (master=root)
trace_planB_button.place(relx=0.93,rely=0.28,height=31,width=81)
trace_planB_button.configure(command=trace_planB)
trace_planB_button.configure(pady="0")
trace_planB_button.configure(text='''Draw plane''')
show_ind_button = Checkbutton (master=root)
show_ind_button.place(relx=0.81,rely=0.7,relheight=0.03
,relwidth=0.11)
show_ind_button.configure(text='''Show indices''')
show_ind_button.configure(variable=show_ind)
show_angle_button = Checkbutton (master=root)
show_angle_button.place(relx=0.81,rely=0.74,relheight=0.03
,relwidth=0.11)
show_angle_button.configure(text='''Show angle''')
show_angle_button.configure(variable=show_angle)
show_axe_button = Checkbutton (master=root)
show_axe_button.place(relx=0.81,rely=0.78,relheight=0.03
,relwidth=0.11)
show_axe_button.configure(text='''Show axes''')
show_axe_button.configure(variable=show_axe)
show_num_button = Checkbutton (master=root)
show_num_button.place(relx=0.81,rely=0.82,relheight=0.03
,relwidth=0.11)
show_num_button.configure(text='''Show numbers''')
show_num_button.configure(variable=show_num)
menu = Menu(master=root)
filemenu = Menu(menu, tearoff=0)
menu.add_cascade(label="Save", menu=filemenu)
root.config(menu=menu)
filemenu.add_command(label="Save data", command=file_save)
filemenu.add_command(label="Save figure", command=image_save)
######################################################################################################
######## importer des structures cristallines depuis un fichier Nom,a,b,c,alpha,beta,gamma,space group
######################################################################################################
def structure(i0):
global x0
a_entry.delete(0,END)
a_entry.insert(1,eval(x0[i0][1]))
b_entry.delete(0,END)
b_entry.insert(1,eval(x0[i0][2]))
c_entry.delete(0,END)
c_entry.insert(1,eval(x0[i0][3]))
alp_entry.delete(0,END)
alp_entry.insert(1,eval(x0[i0][4]))
bet_entry.delete(0,END)
bet_entry.insert(1,eval(x0[i0][5]))
gam_entry.delete(0,END)
gam_entry.insert(1,eval(x0[i0][6]))
def createstructure(i):
return lambda:structure(i)
cristalmenu=Menu(menu,tearoff=0)
menu.add_cascade(label="Structures", menu=cristalmenu)
file_struct=open(os.path.join(os.path.dirname(__file__), 'structure.txt') ,"r")
x0=[]
i=0
for line in file_struct:
x0.append(map(str, line.split()))
cristalmenu.add_command(label=x0[i][0], command=createstructure(i))
i=i+1
file_struct.close()
#######################################################################################################
phi1A_entry.insert(0,0)
phiA_entry.insert(0,0)
phi2A_entry.insert(0,0)
phi1B_entry.insert(0,0)
phiB_entry.insert(0,0)
phi2B_entry.insert(0,0)
e_entry.insert(1,1)
d_entry.insert(1,1)
mainloop()
| gpl-2.0 |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py | 1 | 1812 | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,plotToo=True,color=None,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
if plotToo:
plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10)
plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label)
return
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
# prepare figure
plt.figure(figsize=(10,10))
plt.grid()
plt.title("smart baseline value distribution")
plt.xlabel(abf.units2)
plt.ylabel("normalized density")
# do the analysis
abf.kernel=abf.kernel_gaussian(sizeMS=500)
abf.setsweep(175)
analyzeSweep(abf,color='b',label="baseline")
abf.setsweep(200)
analyzeSweep(abf,color='g',label="TGOT")
abf.setsweep(375)
analyzeSweep(abf,color='y',label="washout")
# show figure
plt.legend()
plt.margins(0,.1)
plt.show()
print("DONE")
| mit |
blbarker/spark-tk | regression-tests/sparktkregtests/testcases/models/gmm_test.py | 12 | 7116 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test guassian mixture models against known values"""
import unittest
from collections import Counter
from numpy.testing import assert_almost_equal
from sparktkregtests.lib import sparktk_test
class GMMModelTest(sparktk_test.SparkTKTestCase):
def setUp(self):
data_file = self.get_file("gmm_data.csv")
self.frame = self.context.frame.import_csv(
data_file, schema=[("x1", float), ("x2", float)])
def test_train(self):
""" Verify that model operates as expected in straightforward case"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=5,
max_iterations=500,
seed=20,
convergence_tol=0.0001)
actual_mu = [g.mu for g in model.gaussians]
actual_sigma = [g.sigma for g in model.gaussians]
expected_mu = \
[[7.0206, -10.1706],
[7.8322, -10.2383],
[-1.3816, 6.7215],
[-0.04184, 5.8039],
[-4.1743, 8.5564]]
expected_sigma = \
[[[0.2471, -0.3325],
[-0.3325, 0.5828]],
[[2.3005, 0.6906],
[0.6906, 2.1103]],
[[1.5941, -3.5325],
[-3.5325, 7.8424]],
[[0.9849, 0.04328],
[0.04328, 0.3736]],
[[0.1168, 0.1489],
[0.1489, 0.9757]]]
assert_almost_equal(actual_mu, expected_mu, decimal=3)
assert_almost_equal(actual_sigma, expected_sigma, decimal=3)
def test_predict(self):
""" Tests output of predict """
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=3,
max_iterations=100,
seed=15)
predicted_frame = model.predict(self.frame)
results_df = predicted_frame.to_pandas(self.frame.count())
actual_cluster_sizes = Counter(results_df["predicted_cluster"].tolist())
expected_cluster_sizes = {2: 27, 0: 17, 1: 6}
self.assertItemsEqual(actual_cluster_sizes, expected_cluster_sizes)
def test_gmm_1_cluster(self):
"""Test gmm doesn't error on k=1"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], [1.0, 1.0], k=1)
def test_gmm_1_iteration(self):
"""Train on 1 iteration only, shouldn't throw exception"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=1)
def test_gmm_high_convergence(self):
"""Train on high convergence, should not throw exception"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0],
convergence_tol=1e6)
def test_gmm_negative_seed(self):
"""Train on negative seed, shouldn't throw exception"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0],
seed=-20)
def test_gmm_0_scalings(self):
"""all-zero column scalings, shouldn't throw exception"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[0.0, 0.0])
def test_gmm_negative_scalings(self):
"""negative column scalings, shouldn't throw exception"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[-1.0, -1.0])
def test_gmm_empty_frame(self):
""" Verify that model operates as expected in straightforward case"""
# Train on an empty frame
block_data = []
frame = self.context.frame.create(
block_data,
[("x1", float)])
with self.assertRaisesRegexp(
Exception, "empty collection"):
self.context.models.clustering.gmm.train(
frame, ["x1"], column_scalings=[1.0])
def test_0_classes_errors(self):
"""Train on 0 classes, should error"""
with self.assertRaisesRegexp(
Exception, "k must be at least 1"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0], k=0)
def test_negative_classes(self):
"""Train on negative classes, should error"""
with self.assertRaisesRegexp(
Exception, "k must be at least 1"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0], k=-5)
def test_0_iterations(self):
"""Train on 0 iterations, should error"""
with self.assertRaisesRegexp(
Exception, "maxIterations must be a positive value"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=0)
def test_negative_iterations(self):
"""Train on negative iterations, should error"""
with self.assertRaisesRegexp(
Exception, "maxIterations must be a positive value"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[1.0],
max_iterations=-20)
def test_wrong_column_scalings(self):
"""Insufficient column scalings, should error"""
with self.assertRaisesRegexp(
Exception, "columnWeights must not be null or empty"):
self.context.models.clustering.gmm.train(
self.frame, ["x1"], column_scalings=[])
def test_too_many_column_scalings(self):
"""Extra column scalings, should error"""
with self.assertRaisesRegexp(
Exception,
"Length of columnWeights and observationColumns.*"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], column_scalings=[1.0, 1.0, 1.0])
def test_missing_column_scalings(self):
"""Missing column scalings, should error"""
with self.assertRaisesRegexp(
TypeError, "train\(\) takes at least 3 arguments.*"):
self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"], k=2)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
chrisburr/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
pauliacomi/pyGAPS | tests/characterisation/test_t_plot.py | 1 | 2958 | """
This test module has tests relating to t-plots
All functions in /calculations/tplot.py are tested here.
The purposes are:
- testing the user-facing API function (tplot)
- testing individual low level functions against known results.
Functions are tested against pre-calculated values on real isotherms.
All pre-calculated data for characterisation can be found in the
/.conftest file together with the other isotherm parameters.
"""
import pytest
from matplotlib.testing.decorators import cleanup
from numpy import isclose
import pygaps
import pygaps.utilities.exceptions as pgEx
from .conftest import DATA
from .conftest import DATA_N77_PATH
@pytest.mark.characterisation
class TestTPlot():
"""Tests t-plot calculations."""
def test_alphas_checks(self, basic_pointisotherm):
"""Checks for built-in safeguards."""
# Will raise a "no suitable model exception"
with pytest.raises(pgEx.ParameterError):
pygaps.t_plot(basic_pointisotherm, thickness_model='random')
@pytest.mark.parametrize('sample', [sample for sample in DATA])
def test_tplot(self, sample):
"""Test calculation with several model isotherms."""
sample = DATA[sample]
# exclude datasets where it is not applicable
if sample.get('t_area', None):
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
res = pygaps.t_plot(isotherm)
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'],
err_relative, err_absolute_area
)
assert isclose(
results[0].get('area'), sample['t_area'], err_relative,
err_absolute_volume
)
def test_tplot_choice(self):
"""Test choice of points."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
res = pygaps.t_plot(isotherm, limits=[0.7, 1.0])
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'],
err_relative, err_absolute_area
)
assert isclose(
results[-1].get('area'), sample['s_t_area'], err_relative,
err_absolute_volume
)
@cleanup
def test_tplot_output(self):
"""Test verbosity."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
pygaps.t_plot(isotherm, 'Halsey', verbose=True)
| mit |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/vibe_min/indicators/make_indicators.py | 4 | 4825 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
# script to produce a number of PSRC indicators --
# this illustrates using traits-based configurations programatically
from opus_core.configurations.dataset_pool_configuration import DatasetPoolConfiguration
from opus_core.indicator_framework.core.source_data import SourceData
from opus_core.indicator_framework.image_types.matplotlib_map import Map
from opus_core.indicator_framework.image_types.matplotlib_chart import Chart
from opus_core.indicator_framework.image_types.table import Table
from opus_core.indicator_framework.image_types.geotiff_map import GeotiffMap
from opus_core.indicator_framework.image_types.dataset_table import DatasetTable
from opus_core.indicator_framework.image_types.matplotlib_lorenzcurve import LorenzCurve
#some cache_directories and run descriptions
#cache_directory = r'Y:/urbansim_cache/run_1090.2006_11_14_12_12'
#run_description = '(run 1090 - double highway capacity 11/28/2006)'
#cache_directory = r'Y:/urbansim_cache/run_1091.2006_11_14_12_12'
#run_description = '(run 1091 - baseline 11/28/2006)'
#cache_directory = r'D:\urbansim_cache\run_1454.2006_12_12_16_28'
#run_description = '(run 1454 - travel data from quick travel model)'
cache_directory = r'D:\urbansim_cache\run_1090.2006_11_14_12_12'
run_description = '(run 1453 - travel data from full travel model)'
#cache_directory = r'Y:\urbansim_cache\run_1431.2006_12_08_09_45'
#run_description = '(run 1431 - baseyear travel data from travel model run)'
#cache_directory = r'D:\urbansim_cache\run_1154.2006_11_17_20_06'
#run_description = '(run 1154 - no ugb + double highway capacity 11/28/2006)'
#cache_directory = r'D:\urbansim_cache\run_1155.2006_11_17_20_07'
#run_description = '(run 1155 - no ugb 11/28/2006)'
source_data = SourceData(
cache_directory = cache_directory,
run_description = run_description,
years = [1980, 1981, 1982],
dataset_pool_configuration = DatasetPoolConfiguration(
package_order=['eugene','urbansim','opus_core'],
),
)
single_year_requests = [
Table(
attribute = 'urbansim.zone.population',
dataset_name = 'zone',
source_data = source_data,
),
Table(
attribute = 'urbansim.zone.number_of_jobs',
dataset_name = 'zone',
source_data = source_data,
),
Map(
attribute = 'urbansim.zone.population',
scale = [1, 60000],
dataset_name = 'zone',
source_data = source_data,
),
Map(
attribute = 'urbansim.zone.number_of_jobs',
scale = [1, 60000],
dataset_name = 'zone',
source_data = source_data,
),
Map(
scale = [-8000, 40000],
attribute = 'urbansim_population_change',
source_data = source_data,
expression = {'operation': 'change',
'operands': ['urbansim.zone.population']},
dataset_name = 'zone',
),
Map(
scale = [-2000, 40000],
attribute = 'urbansim_employment_change',
source_data = source_data,
expression = {'operation': 'change',
'operands': ['urbansim.zone.number_of_jobs']},
dataset_name = 'zone',
),
]
source_data = SourceData(
cache_directory = cache_directory,
run_description = run_description,
years = [1980, 1981, 1982],
dataset_pool_configuration = DatasetPoolConfiguration(
package_order=['eugene','urbansim','opus_core'],
),
)
multi_year_requests = [
Table(
attribute = 'alldata.aggregate_all(urbansim.gridcell.residential_units, function=sum)',
dataset_name = 'alldata',
source_data = source_data,
name = 'residential_units'
),
Chart(
attribute = 'alldata.aggregate_all(urbansim.gridcell.residential_units, function=sum)',
dataset_name = 'alldata',
source_data = source_data,
name = 'residential_units'
),
Table(
attribute = 'alldata.aggregate_all(urbansim.gridcell.number_of_jobs, function=sum)',
dataset_name = 'alldata',
source_data = source_data,
name = 'number_of_jobs'
),
]
if __name__ == '__main__':
from opus_core.indicator_framework.core.indicator_factory import IndicatorFactory
IndicatorFactory().create_indicators(
indicators = single_year_requests,
display_error_box = False,
show_results = True)
IndicatorFactory().create_indicators(
indicators = multi_year_requests,
display_error_box = False,
show_results = True) | gpl-2.0 |
anacode/anacode-toolkit | anacode/api/writers.py | 1 | 20217 | # -*- coding: utf-8 -*-
import os
import csv
import datetime
import pandas as pd
from itertools import chain
from functools import partial
from anacode import codes
def backup(root, files):
"""Backs up `files` from `root` directory and return list of backed up
file names. Backed up files will have datetime suffix appended to original
file name.
:param root: Absolute path to folder where files to backup are located
:type root: str
:param files: Names of files that needs backing up
:type files: str
:return: list -- List of backed up file names
"""
backed_up = []
join = os.path.join
root_contents = os.listdir(root)
dt_str = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
for file_name in files:
if file_name not in root_contents:
continue
new_name = file_name + '_' + dt_str
os.rename(join(root, file_name), join(root, new_name))
backed_up.append(new_name)
return backed_up
HEADERS = {
'categories': [u'doc_id', u'text_order', u'category', u'probability'],
'concepts': [u'doc_id', u'text_order', u'concept', u'freq',
u'relevance_score', u'concept_type'],
'concepts_surface_strings': [u'doc_id', u'text_order', u'concept',
u'surface_string', u'text_span'],
'sentiments': [u'doc_id', u'text_order', u'sentiment_value'],
'absa_entities': [u'doc_id', u'text_order', u'entity_name', u'entity_type',
u'surface_string', u'text_span'],
'absa_normalized_texts': [u'doc_id', u'text_order', u'normalized_text'],
'absa_relations': [u'doc_id', u'text_order', u'relation_id',
u'opinion_holder', u'restriction', u'sentiment_value',
u'is_external', u'surface_string', u'text_span'],
'absa_relations_entities': [u'doc_id', u'text_order', u'relation_id',
u'entity_type', u'entity_name'],
'absa_evaluations': [u'doc_id', u'text_order', u'evaluation_id',
u'sentiment_value', u'surface_string', u'text_span'],
'absa_evaluations_entities': [u'doc_id', u'text_order', u'evaluation_id',
u'entity_type', u'entity_name'],
}
# `anacode.agg.aggregations.ApiDataset.from_path` depends
# on ordering of files defined in values here
CSV_FILES = {
'categories': ['categories.csv'],
'concepts': ['concepts.csv', 'concepts_surface_strings.csv'],
'sentiments': ['sentiments.csv'],
'absa': [
'absa_entities.csv', 'absa_normalized_texts.csv',
'absa_relations.csv', 'absa_relations_entities.csv',
'absa_evaluations.csv', 'absa_evaluations_entities.csv'
]
}
def categories_to_list(doc_id, analyzed, single_document=False):
"""Converts categories response to flat list with doc_id included.
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for categories call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'categories' pointing to flat list
of categories
"""
cat_list = []
for order, text_analyzed in enumerate(analyzed):
for result_dict in text_analyzed:
row = [doc_id, 0, result_dict.get('label'),
result_dict.get('probability')]
if single_document:
row[1] += order
else:
row[0] += order
cat_list.append(row)
return {'categories': cat_list}
def concepts_to_list(doc_id, analyzed, single_document=False):
"""Converts concepts response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for concepts call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with two keys: 'concepts' pointing to flat list
of found concepts and their metadata and 'concepts_surface_strings'
pointing to flat list of strings realizing found concepts
"""
con_list, exp_list = [], []
for order, text_analyzed in enumerate(analyzed):
for concept in text_analyzed or []:
row = [doc_id, 0, concept.get('concept'),
concept.get('freq'), concept.get('relevance_score'),
concept.get('type')]
if single_document:
row[1] += order
else:
row[0] += order
con_list.append(row)
for string in concept.get('surface', []):
surface_str, span = string['surface_string'], string['span']
exp_list.append([row[0], row[1], concept.get('concept'),
surface_str, '-'.join(map(str, span))])
return {'concepts': con_list, 'concepts_surface_strings': exp_list}
def sentiments_to_list(doc_id, analyzed, single_document=False):
"""Converts sentiments response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for sentiment call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'sentiments' pointing to flat list
of sentiment probabilities
"""
sen_list = []
for order, sentiment in enumerate(analyzed):
row = [doc_id, 0, sentiment['sentiment_value']]
if single_document:
# this should not happen
row[1] += order
else:
row[0] += order
sen_list.append(row)
return {'sentiments': sen_list}
def _absa_entities_to_list(doc_id, order, entities):
ent_list = []
for entity_dict in entities:
text_span = '-'.join(map(str, entity_dict['surface']['span']))
surface_string = entity_dict['surface']['surface_string']
for semantics in entity_dict['semantics']:
row = [doc_id, order, semantics['value'], semantics['type'],
surface_string, text_span]
ent_list.append(row)
return ent_list
def _absa_normalized_text_to_list(doc_id, order, normalized_text):
return [[doc_id, order, normalized_text]]
def _absa_relations_to_list(doc_id, order, relations):
rel_list, ent_list = [], []
for rel_index, rel in enumerate(relations):
rel_row = [doc_id, order, rel_index,
rel['semantics']['opinion_holder'],
rel['semantics']['restriction'],
rel['semantics']['sentiment_value'],
rel['external_entity'],
rel['surface']['surface_string'],
'-'.join(map(str, rel['surface']['span']))]
rel_list.append(rel_row)
for ent in rel['semantics'].get('entity', []):
ent_row = [doc_id, order, rel_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return rel_list, ent_list
def _absa_evaluations_to_list(doc_id, order, evaluations):
eval_list, ent_list = [], []
for eval_index, evaluation in enumerate(evaluations):
eval_row = [doc_id, order, eval_index,
evaluation['semantics']['sentiment_value'],
evaluation['surface']['surface_string'],
'-'.join(map(str, evaluation['surface']['span']))]
eval_list.append(eval_row)
for ent in evaluation['semantics'].get('entity', []):
ent_row = [doc_id, order, eval_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return eval_list, ent_list
def absa_to_list(doc_id, analyzed, single_document=False):
"""Converts ABSA response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for ABSA call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with six keys: 'absa_entities' pointing to flat
list of found entities with metadata, 'absa_normalized_texts' pointing to
flat list of normalized chinese texts, 'absa_relations' pointing to found
entity relations with metadata, 'absa_relations_entities' pointing to flat
list of entities that belong to absa relations, 'absa_evaluations'
pointing to flat list of entity evaluations with metadata and
'absa_evaluations_entities' specifying entities in absa_evaluations
"""
absa = {
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': []
}
for order, text_analyzed in enumerate(analyzed):
if single_document:
current_id = doc_id
text_order = order
else:
current_id = doc_id + order
text_order = 0
entities = text_analyzed['entities']
ents = _absa_entities_to_list(current_id, text_order, entities)
text = text_analyzed['normalized_text']
texts = _absa_normalized_text_to_list(current_id, text_order, text)
relations = text_analyzed['relations']
rels, rel_ents = _absa_relations_to_list(current_id, text_order,
relations)
evaluations = text_analyzed['evaluations']
evals, eval_ents = _absa_evaluations_to_list(current_id, text_order,
evaluations)
absa['absa_entities'].extend(ents)
absa['absa_normalized_texts'].extend(texts)
absa['absa_relations'].extend(rels)
absa['absa_relations_entities'].extend(rel_ents)
absa['absa_evaluations'].extend(evals)
absa['absa_evaluations_entities'].extend(eval_ents)
return absa
class Writer(object):
"""Base "abstract" class containing common methods that are
needed by all implementations of Writer interface.
The writer interface consists of init, close and write_bulk methods.
"""
def __init__(self):
self.ids = {'scrape': 0, 'analyze': 0}
def write_row(self, call_type, call_result):
"""Decides what kind of data it got and calls appropriate write method.
:param call_type: Library's ID of anacode call
:type call_type: int
:param call_result: JSON response from Anacode API
:type call_result: list
"""
if call_type == codes.SCRAPE:
self.write_scrape(call_result)
if call_type == codes.ANALYZE:
self.write_analysis(call_result)
def _add_new_data_from_dict(self, new_data):
"""Not implemented here!
Used by write methods to submit new Anacode API response data for storage.
:param new_data: dict; keys are data sets names and values are
flat lists of rows
:type new_data: dict
"""
pass
def write_scrape(self, scraped):
self.ids['scrape'] += 1
def write_analysis(self, analyzed):
"""Inspects analysis result for performed analysis and delegates
persisting of results to appropriate write methods.
:param analyzed: JSON object analysis response
:type: dict
"""
single_document = analyzed.get('single_document', False)
analyzed_length = 1
if 'categories' in analyzed:
categories = analyzed['categories']
self.write_categories(categories, single_document=single_document)
if not single_document:
analyzed_length = len(categories)
if 'concepts' in analyzed:
concepts = analyzed['concepts']
self.write_concepts(concepts, single_document=single_document)
if not single_document:
analyzed_length = len(concepts)
if 'sentiment' in analyzed:
sentiment = analyzed['sentiment']
self.write_sentiment(sentiment, single_document=single_document)
if not single_document:
analyzed_length = len(sentiment)
if 'absa' in analyzed:
absa = analyzed['absa']
self.write_absa(analyzed['absa'], single_document=single_document)
if not single_document:
analyzed_length = len(absa)
self.ids['analyze'] += analyzed_length
def write_categories(self, analyzed, single_document=False):
"""Converts categories analysis result to flat lists and stores them.
:param analyzed: JSON categories analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = categories_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_concepts(self, analyzed, single_document=False):
"""Converts concepts analysis result to flat lists and stores them.
:param analyzed: JSON concepts analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = concepts_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_sentiment(self, analyzed, single_document=False):
"""Converts sentiment analysis result to flat lists and stores them.
:param analyzed: JSON sentiment analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = sentiments_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_absa(self, analyzed, single_document=False):
"""Converts absa analysis result to flat lists and stores them.
:param analyzed: JSON absa analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = absa_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_bulk(self, results):
"""Stores multiple anacode api's JSON responses marked with call IDs as
tuples (call_id, call_result). Both scrape and analyze call IDs
are defined in anacode.codes module.
:param results: List of anacode responses with IDs of calls used
:type results: list
"""
for call_type, call_result in results:
self.write_row(call_type, call_result)
def init(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def close(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class DataFrameWriter(Writer):
"""Writes Anacode API output into pandas.DataFrame instances."""
def __init__(self, frames=None):
"""Initializes dictionary of result frames. Alternatively uses given
frames dict for storage.
:param frames: Might be specified to use this instead of new dict
:type frames: dict
"""
super(DataFrameWriter, self).__init__()
self.frames = {} if frames is None else frames
self._row_data = {}
def init(self):
"""Initialized empty lists for each possible data frame."""
self._row_data = {
'categories': [],
'concepts': [],
'concepts_surface_strings': [],
'sentiments': [],
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': [],
}
def close(self):
"""Creates pandas data frames to self.frames dict and clears internal
state.
"""
for name, row in self._row_data.items():
if len(row) > 0:
self.frames[name] = pd.DataFrame(row, columns=HEADERS[name])
self._row_data = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self._row_data[name].extend(row_list)
class CSVWriter(Writer):
def __init__(self, target_dir='.'):
"""Initializes Writer to store Anacode API analysis results in target_dir in
csv files.
:param target_dir: Path to directory where to store csv files
:type target_dir: str
"""
super(CSVWriter, self).__init__()
self.target_dir = os.path.abspath(os.path.expanduser(target_dir))
self._files = {}
self.csv = {}
def _open_csv(self, csv_name):
path = partial(os.path.join, self.target_dir)
try:
return open(path(csv_name), 'w', newline='')
except TypeError:
return open(path(csv_name), 'wb')
def init(self):
"""Opens all csv files for writing and writes headers to them."""
self.close()
backup(self.target_dir, chain.from_iterable(CSV_FILES.values()))
self._files = {
'categories': self._open_csv('categories.csv'),
'concepts': self._open_csv('concepts.csv'),
'concepts_surface_strings': self._open_csv(
'concepts_surface_strings.csv'
),
'sentiments': self._open_csv('sentiments.csv'),
'absa_entities': self._open_csv('absa_entities.csv'),
'absa_normalized_texts': self._open_csv(
'absa_normalized_texts.csv'
),
'absa_relations': self._open_csv('absa_relations.csv'),
'absa_relations_entities': self._open_csv(
'absa_relations_entities.csv'
),
'absa_evaluations': self._open_csv('absa_evaluations.csv'),
'absa_evaluations_entities': self._open_csv(
'absa_evaluations_entities.csv'
),
}
self.csv = {name: csv.writer(fp) for name, fp in self._files.items()}
for name, writer in self.csv.items():
writer.writerow(HEADERS[name])
def _csv_has_content(self, csv_path):
if not os.path.isfile(csv_path):
return False
with open(csv_path) as fp:
for line_count, line in enumerate(fp):
if line_count == 1 and len(line.strip()) != '':
return True
return False
def close(self):
"""Closes all csv files and removes empty ones."""
for name, file in self._files.items():
try:
file.close()
except (IOError, AttributeError):
print('Problem closing "{}"'.format(name))
for file_list in CSV_FILES.values():
for file_name in file_list:
path = os.path.join(self.target_dir, file_name)
if os.path.isfile(path) and not self._csv_has_content(path):
os.unlink(path)
self._files = {}
self.csv = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self.csv[name].writerows(row_list)
| bsd-3-clause |
boland1992/seissuite_iran | build/lib.linux-x86_64-2.7/seissuite/ant/psdepthmodel.py | 6 | 9233 | """
Module taking care of the forward modelling: theoretical dispersion
curve given a 1D crustal model of velocities and densities.
Uses the binaries of the Computer Programs in Seismology, with
must be installed in *COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR*
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import shutil
import itertools as it
from easyprocess import EasyProcess
import tempfile
import pickle
# getting the dir of the binaries of the Computer Programs in Seismology
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR=CONFIG.COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR
# default header of the model file:
# isotropic, 1D, flat Earth with layers of constant velocity
MODEL_HEADER = """MODEL.01
TEST
ISOTROPIC
KGS
FLAT EARTH
1-D
CONSTANT VELOCITY
LINE08
LINE09
LINE10
LINE11
H VP VS RHO QP QS ETAP ETAS FREFP FREFS"""
class VsModel:
"""
Class holding a layered model of Vs function of depth,
with Vp/Vs and rho/Vs ratio fixed.
"""
def __init__(self, vs, dz, ratio_vp_vs, ratio_rho_vs, name='',
store_vg_at_periods=None):
"""
Initializes model with layers' Vs (vs), layers' thickness (dz),
and layers' ratio Vp/Vs and rho/Vs (ratio_vp_vs, ratio_rho_vs).
"""
# checking shapes
nlayers = np.size(vs)
if np.size(dz) != nlayers - 1:
raise Exception("Size of dz should be nb of layers minus 1")
if not np.size(ratio_vp_vs) in [1, nlayers]:
raise Exception("Size of ratio_vp_vs should be nb of layers or 1")
if not np.size(ratio_rho_vs) in [1, nlayers]:
raise Exception("Size of ratio_rho_vs should be nb of layers or 1")
self.name = name
self.vs = np.array(vs)
self.dz = np.array(dz)
self.ratio_vp_vs = np.array(ratio_vp_vs)
self.ratio_rho_vs = np.array(ratio_rho_vs)
# storing vg model at selected periods if required
self.stored_vgperiods = store_vg_at_periods
if not store_vg_at_periods is None:
self.stored_vg = self.vg_model(store_vg_at_periods)
else:
self.stored_vg = None
def misfit_to_vg(self, periods, vg, sigmavg, squared=True,
use_storedvg=True, storevg=False):
"""
Misfit of modelled vg to observed vg
[vg_model - vg]**2
= Sum ------------------ over periods
2 x sigmavg**2
"""
# using stored vg model if required and available, else re-calculating it
if use_storedvg and np.all(periods == self.stored_vgperiods):
vg_model = self.stored_vg
else:
vg_model = self.vg_model(periods, store=storevg)
misfit = np.sum(((vg_model - vg) / sigmavg)**2) / 2.0
if squared:
misfit = np.sqrt(misfit)
return misfit
def vg_model(self, periods, store=False):
"""
Modelled group velocities, vg, function of period
"""
vs = self.vs
vp = self.ratio_vp_vs * self.vs
rho = self.ratio_rho_vs * self.vs
dz = np.r_[self.dz, 0] # we append a fake thickness
vg = Rayleigh_group_velocities(periods, dz=dz, vp=vp, vs=vs, rho=rho)
if store:
# storing group velocities if required
self.stored_vgperiods = periods
self.stored_vg = vg
return vg
def get_vs_at(self, z):
"""
Returns Vs ad depth(s) *z*
"""
indices = np.searchsorted(np.r_[0, self.dz.cumsum()], z, side='right') - 1
if np.any(indices) < 0:
raise Exception("Depth out of range")
return self.vs[indices]
def plot(self, periods, obsvgarrays=None, fig=None, color='r'):
"""
Plots modelled and observed group velocity function of period (top)
and the model itself, i.e. Vs vs depth (bottom)
"""
if not fig:
fig = plt.figure(figsize=(6.5, 10), tight_layout=True)
axlist = [fig.add_subplot(211), fig.add_subplot(212)]
legend = True
else:
axlist = fig.get_axes()
legend = False # no need to add legend to existing fig
# 1st subplot: group velocity vs period
ax = axlist[0]
self.plot_vg(periods, obsvgarrays=obsvgarrays, ax=ax, legend=legend, color=color)
ax.set_title(self.name)
# 2nd subplot: Vs vs depth
ax = axlist[1]
self.plot_model(ax=ax, color=color)
fig.canvas.draw()
fig.show()
return fig
def plot_vg(self, periods, obsvgarrays=None, ax=None, legend=True, color='r'):
"""
Plots modelled and observed group velocity function of period
"""
# creating figure if not given as input
fig = None
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
vg_model = self.vg_model(periods)
ax.plot(periods, vg_model, lw=1.5, color=color, label=self.name)
if obsvgarrays:
for i, vgarray in enumerate(obsvgarrays):
label = 'Observed dispersion curves' if not i else None
ax.plot(periods, vgarray, lw=0.5, color='k', label=label)
ax.set_xlabel('Period (sec)')
ax.set_ylabel('Group velocity (km/s)')
if legend:
ax.legend(loc='best', fontsize=11, framealpha=0.8)
ax.grid(True)
if fig:
fig.show()
def plot_model(self, ax=None, color='r', format_axes=True):
"""
Plots the model, i.e. Vs vs depth
"""
# creating figure if not given as input
fig = None
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
x = list(it.chain.from_iterable([[v, v] for v in self.vs]))
y = [0.0] + list(it.chain.from_iterable([[z, z] for z in np.cumsum(self.dz)])) + \
[self.dz.sum() + 15]
ax.plot(x, y, lw=1.5, color=color)
if format_axes:
ax.set_ylim(sorted(ax.get_ylim(), reverse=True))
ax.set_xlabel('Vs (km/s)')
ax.set_ylabel('Depth (km)')
ax.grid(True)
if fig:
fig.show()
def Rayleigh_group_velocities(periods, dz, vp, vs, rho, verbose=False):
"""
Returns the array of Rayleigh wave group velocities at
selected periods, from the 1-D layered Earth model
contained in *dz* (thicknesses), *vp* (P wave velocities),
*vs* (S wave velocities) and *rho* (densities).
The Computer Programs in Seismology, located in dir
*COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR*,
are used for the computation.
"""
if not COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR:
raise Exception("Please provide the dir of the Computer Programs in Seismology")
# making and moving to temporary dir
current_dir = os.getcwd()
tmp_dir = tempfile.mkdtemp()
os.chdir(tmp_dir)
# preparing input files
if verbose:
print 'Preparing model and periods files'
create_model_file('model', dz, vp, vs, rho)
f = open('periods', 'w')
f.write('\n'.join([str(p) for p in periods]))
f.close()
# preparing model
if verbose:
print "Calling sprep96"
cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sprep96')
# Rayleigh wave, fundamental mode
p = EasyProcess('"{}" -M model -PARR periods -NMOD 1 -R'.format(cmd)).call()
if verbose:
print p.stdout
# phase dispersion curve
if verbose:
print "Calling sdisp96"
cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdisp96')
p = EasyProcess('"{}" -v'.format(cmd)).call()
if verbose:
print p.stdout
# group dispersion curve
if verbose:
print "Calling sregn96"
cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sregn96')
p = EasyProcess('"{}"'.format(cmd)).call()
if verbose:
print p.stdout
# exporting group velocities (-U) of Rayleigh waves (-R) in ascii file
if verbose:
print "Calling sdpegn96"
cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdpegn96')
p = EasyProcess('"{}" -R -S -U -XLOG -PER -ASC'.format(cmd)).call()
if verbose:
print p.stdout
# loading group velocities from 6th column of ascii file
vg = np.loadtxt('SREGN.ASC', skiprows=1, usecols=(5,))
# removing temp dir
os.chdir(current_dir)
shutil.rmtree(tmp_dir)
return vg
def create_model_file(path, dz, vp, vs, rho):
"""
Writing the 1D model to ascci file, to be used as input
by the Computer Programs in Seismology
"""
qp = np.zeros_like(dz)
qs = np.zeros_like(dz)
etap = np.zeros_like(dz)
etas = np.zeros_like(dz)
frefp = np.ones_like(dz)
frefs = np.ones_like(dz)
f = open(path, mode='w')
f.write(MODEL_HEADER)
a = np.vstack((dz, vp, vs, rho, qp, qs, etap, etas, frefp, frefs))
for col in a.T:
f.write('\n')
col.tofile(f, sep=' ')
f.close() | gpl-3.0 |
ucdrascal/hcibench | axopy/storage.py | 2 | 12668 | """Experiment data storage.
There are two main use cases for the functionality in this module:
reading/writing data during an experiment session, and reading data once an
experiment is complete (i.e. for analysis). See the :ref:`user guide <storage>`
for information on these use cases/api.jpeg/api.jpeg/api.jpeg.
"""
import os
import h5py
import numpy
import pandas
import zipfile
import shutil
import pickle
import logging
#
# Highest layer. Used by tasks to obtain task readers/writers
#
class Storage(object):
"""Top-level data storage maintainer.
See the :ref:`user guide <storage>` for more information.
Parameters
----------
root : str, optional
Path to the root of the data storage filestructure. By default, 'data'
is used. If the directory doesn't exist, it is created.
allow_overwrite : bool, optional
Specifies whether or not the storage interface allows you to overwrite
a task's data for a subject if it already exists.
"""
def __init__(self, root='data', allow_overwrite=False):
self.root = root
self.allow_overwrite = allow_overwrite
makedirs(root, exist_ok=True)
self._subject_id = None
@property
def subject_ids(self):
"""Generate subject IDs found in storage sorted in alphabetical order.
Returns
-------
subject_id : str
ID of the subject found.
"""
ls = os.listdir(self.root)
for name in sorted(ls):
path = os.path.join(self.root, name)
if os.path.isdir(path):
yield name
@property
def subject_id(self):
"""The current subject ID.
When setting the subject ID for a new subject (i.e. one that doesn't
exist already), storage for that subject is created.
"""
return self._subject_id
@subject_id.setter
def subject_id(self, val):
makedirs(os.path.join(self.root, val), exist_ok=True)
self._subject_id = val
@property
def task_ids(self):
"""Generate names of tasks found for the current subject.
Note that there may be no tasks found if the `subject_id` has not been
set or if the subject hasn't started any tasks. In this case, nothing
is yielded.
"""
if self.subject_id is None:
return
subj_path = os.path.join(self.root, self.subject_id)
ls = os.listdir(subj_path)
for name in sorted(ls):
path = os.path.join(subj_path, name)
if os.path.isdir(path):
yield name
def create_task(self, task_id):
"""Create a task for the current subject.
Parameters
----------
task_id : str
The ID of the task to add. The name must not have been used for
another task for the current subject.
Returns
-------
writer : TaskWriter
A new TaskWriter for storing task data.
"""
path = self._task_path(task_id)
try:
makedirs(path)
except OSError:
if self.allow_overwrite:
shutil.rmtree(path)
makedirs(path)
else:
raise ValueError(
"Subject {} has already started \"{}\". Only unique task "
"names are allowed.".format(self.subject_id, task_id))
return TaskWriter(path)
def require_task(self, task_id):
"""Retrieves a task for the current subject.
Parameters
----------
task_id : str
The ID of the task to look for. The task must have already been run
with the current subject.
Returns
-------
reader : TaskReader
A new TaskReader for working with the existing task data.
"""
if task_id not in self.task_ids:
raise ValueError(
"Subject {} has not started \"{}\" yet. Use `create_task` to "
"create it first.".format(self.subject_id, task_id))
path = self._task_path(task_id)
return TaskReader(path)
def to_zip(self, outfile):
"""Create a ZIP archive from a data storage hierarchy.
For more information, see :func:`storage_to_zip`.
"""
storage_to_zip(self.root, outfile)
def _task_path(self, task_id):
return os.path.join(self.root, self.subject_id, task_id)
#
# Middle layer. Used by tasks to read/write data.
#
class TaskWriter(object):
"""The main interface for storing data from a task.
Usually you get a :class:`Taskwriter` from :class:`Storage`, so you don't
normally need to create one yourself.
Parameters
----------
root : str
Path to the task root (e.g. 'data/subject_1/taskname').
Attributes
----------
trials : TrialWriter
:class:`TrialWriter` for storing trial data.
"""
def __init__(self, root):
self.root = root
self.trials = TrialWriter(_trials_path(self.root))
def write(self, trial):
"""Write trial data.
This must be the last thing done for the current trial. That is, make
sure all arrays have accumulated all data required. This method flushes
trial and array data to files for you.
**Important note**: The trial's arrays are cleared after writing.
Parameters
----------
trial : Trial
Tral data. See :meth:`TrialWriter.write` and :class:`Trial` for
details.
"""
logging.info('saving trial {}:{}\n{}'.format(
trial.attrs['block'], trial.attrs['trial'], str(trial)))
self.trials.write(trial.attrs)
ind = self.trials.df.index[-1]
for name, array in trial.arrays.items():
path = _array_path(self.root, name)
write_hdf5(path, array.data, dataset=str(ind))
array.clear()
def pickle(self, obj, name):
"""Write a generic object to storage.
This can be useful to persist an object from one task to another, or to
store something that doesn't easily fit into the AxoPy storage model
(trial attributes and arrays). Be cautious, however, as pickles are not
the best way to store things long-term nor securely. See the advice
given here, for example:
http://scikit-learn.org/stable/modules/model_persistence.html
Parameters
----------
obj : object
The object to pickle.
name : str
Name of the pickle to save (no extension).
"""
with open(_pickle_path(self.root, name), 'wb') as f:
pickle.dump(obj, f)
class TaskReader(object):
"""High-level interface to task storage.
Parameters
----------
root : str
Path to task's root directory. This is the directory specific to a task
which contains a ``trials.csv`` file and HDF5 array files.
"""
def __init__(self, root):
self.root = root
self._trials = None
@property
def trials(self):
"""A Pandas DataFrame representing the trial data."""
if self._trials is None:
self._trials = pandas.read_csv(_trials_path(self.root))
return self._trials
def iterarray(self, name):
"""Iteratively retrieve an array for each trial.
Parameters
----------
name : str
Name of the array type.
"""
for ind in self.trials.index:
dset = str(ind)
yield read_hdf5(_array_path(self.root, name), dataset=dset)
def array(self, name):
"""Retrieve an array type's data for all trials."""
return numpy.vstack(self.iterarray(name))
def pickle(self, name):
"""Load a pickled object from storage.
Parameters
----------
name : str
Name of the pickled object (no extension).
"""
with open(_pickle_path(self.root, name), 'rb') as f:
obj = pickle.load(f)
return obj
#
# Lowest layer. Used by TaskReader/TaskWriter.
#
class TrialWriter(object):
"""Writes trial data to a CSV file line by line.
Parameters
----------
filepath : str
Path to the file to create.
Attributes
----------
data : dict
Dictionary containing all trial data written so far.
"""
def __init__(self, filepath):
self.filepath = filepath
self.data = {}
def write(self, data):
"""Add a single row to the trials dataset.
Data is immediately added to the file on disk.
Parameters
----------
data : dict
Data values to add.
"""
for col, val in data.items():
if col not in self.data:
self.data[col] = []
self.data[col].append(val)
self.df = pandas.DataFrame(self.data)
self.df.to_csv(self.filepath, index=False)
#
# Utilities
#
def _trials_path(taskroot):
return os.path.join(taskroot, 'trials.csv')
def _array_path(taskroot, arrayname):
return os.path.join(taskroot, '{}.hdf5'.format(arrayname))
def _pickle_path(taskroot, picklename):
return os.path.join(taskroot, '{}.pkl'.format(picklename))
def read_hdf5(filepath, dataset='data'):
"""Read the contents of a dataset.
This function assumes the dataset in the HDF5 file exists at the root of
the file (i.e. at '/'). It is primarily for internal usage but you may find
it useful for quickly grabbing an array from an HDF5 file.
Parameters
----------
filepath : str
Path to the file to read from.
dataset : str, optional
Name of the dataset to retrieve. By default, 'data' is used.
Returns
-------
data : ndarray
The data (read into memory) as a NumPy array. The dtype, shape, etc. is
all determined by whatever is in the file.
"""
with h5py.File(filepath, 'r') as f:
return f.get('/{}'.format(dataset))[:]
def write_hdf5(filepath, data, dataset='data'):
"""Write data to an hdf5 file.
The data is written to a new file with a single dataset called "data" in
the root group. It is primarily for internal usage but you may find it
useful for quickly writing an array to an HDF5 file.
Parameters
----------
filepath : str
Path to the file to be written.
data : ndarray
NumPy array containing the data to write. The dtype, shape, etc. of the
resulting dataset in storage is determined by this array directly.
dataset : str, optional
Name of the dataset to create. Default is 'data'.
"""
with h5py.File(filepath, 'a') as f:
f.create_dataset(dataset, data=data)
def storage_to_zip(path, outfile=None):
"""Create a ZIP archive from a data storage hierarchy.
The contents of the data storage hierarchy are all placed in the archive,
with the top-level folder in the archive being the data storage root folder
itself. That is, all paths within the ZIP file are relative to the dataset
root folder.
Parameters
----------
path : str
Path to the root of the dataset.
outfile : str, optional
Name of the ZIP file to create. If not specified, the file is created
in the same directory as the data root with the same name as the
dataset root directory (with ".zip" added).
Returns
-------
outfile : str
The name of the ZIP file created.
"""
datapath, datadir = os.path.split(path)
if outfile is None:
# absolute path to parent of data root + dataset name + .zip
outfile = os.path.join(datapath, datadir + '.zip')
with zipfile.ZipFile(outfile, 'w') as zipf:
for root, dirs, files in os.walk(path):
for f in files:
# write as *relative* path from data root
zipf.write(os.path.join(root, f),
arcname=os.path.join(datadir, f))
return outfile
def makedirs(path, exist_ok=False):
"""Recursively create directories.
This is needed for Python versions earlier than 3.2, otherwise
``os.makedirs(path, exist_ok=True)`` would suffice.
Parameters
----------
path : str
Path to directory to create.
exist_ok : bool, optional
If `exist_ok` is False (default), an exception is raised. Set to True
if it is acceptable that the directory already exists.
"""
try:
os.makedirs(path)
except OSError:
if not exist_ok:
raise
| mit |
GreenGear5/planet-wars | bots/ml-rfc/ml-rfc.py | 1 | 4276 | #!/usr/bin/env python
"""
Uses the Random Forest classifier
"""
from api import State, util
import random, os
from sklearn.externals import joblib
DEFAULT_MODEL = os.path.dirname(os.path.realpath(__file__)) + '/model.pkl'
class Bot:
__max_depth = -1
__randomize = True
__model = None
def __init__(self, randomize=True, depth=12, model_file=DEFAULT_MODEL):
print(model_file)
self.__randomize = randomize
self.__max_depth = depth
# Load the model
self.__model = joblib.load(model_file)
def get_move(self, state):
val, move = self.value(state)
return move
def value(self, state, alpha=float('-inf'), beta=float('inf'), depth=0):
"""
Return the value of this state and the associated move
:param state:
:param alpha: The highest score that the maximizing player can guarantee given current knowledge
:param beta: The lowest score that the minimizing player can guarantee given current knowledge
:param depth: How deep we are in the tree
:return: val, move: the value of the state, and the best move.
"""
if state.finished():
return (1.0, None) if state.winner() == 1 else (-1.0, None)
if depth == self.__max_depth:
return self.heuristic(state), None
best_value = float('-inf') if maximizing(state) else float('inf')
best_move = None
moves = state.moves()
if self.__randomize:
random.shuffle(moves)
for move in moves:
next_state = state.next(move)
value, m = self.value(next_state, alpha, beta, depth + 1)
if maximizing(state):
if value > best_value:
best_value = value
best_move = move
alpha = best_value
else:
if value < best_value:
best_value = value
best_move = move
beta = best_value
# Prune the search tree
# We know this state will never be chosen, so we stop evaluating its children
if alpha < beta:
break
return best_value, best_move
def heuristic(self, state):
# Convert the state to a feature vector
feature_vector = [features(state)]
# These are the classes: ('won', 'lost')
classes = list(self.__model.classes_)
# Ask the model for a prediction
# This returns a probability for each class
prob = self.__model.predict_proba(feature_vector)[0]
# print prob
# print('{} {} {}'.format(classes, prob, util.ratio_ships(state, 1)))
# Weigh the win/loss outcomes (-1 and 1) by their probabilities
res = -1.0 * prob[classes.index('lost')] + 1.0 * prob[classes.index('won')]
return res
def maximizing(state):
"""
Whether we're the maximizing player (1) or the minimizing player (2).
:param state:
:return:
"""
return state.whose_turn() == 1
def features(state):
# type: (State) -> tuple[float, ...]
"""
Extract features from this state. Remember that every feature vector returned should have the same length.
:param state: A state to be converted to a feature vector
:return: A tuple of floats: a feature vector representing this state.
"""
my_id = state.whose_turn()
opponent_id = 1 if my_id == 0 else 0
# How many ships does p1 have in garrisons?
p1_garrisons = 0.0
# How many ships does p2 have in garrisons?
p2_garrisons = 0.0
p1_planets = 0
p2_planets = 0
for planet in state.planets(my_id):
p1_garrisons += state.garrison(planet)
p1_planets += 1
for planet in state.planets(opponent_id):
p2_garrisons += state.garrison(planet)
p2_planets += 1
# How many ships does p1 have in fleets?
p1_fleets = 0.0
# How many ships does p2 have in fleets?
p2_fleets = 0.0
for fleet in state.fleets():
if fleet.owner() == my_id:
p1_fleets = fleet.size()
else:
p2_fleets += fleet.size()
return p1_garrisons, p2_garrisons, p1_fleets, p2_fleets, p1_planets, p2_planets
| mit |
nettrom/importance | python/wikiproject/confusion-matrix.py | 1 | 8866 | #!/usr/env/python
# -*- coding: utf-8 -*-
'''
Script to predict articles for an entire WikiProject using its trained
model and the entire snapshot dataset.
Copyright (c) 2017 Morten Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import re
import logging
import pickle
from yaml import load
import pandas as pd
import numpy as np
import scipy.stats as st
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import GradientBoostingClassifier as gbm
from sklearn.metrics import confusion_matrix
class WikiProjectPredictor:
def __init__(self):
self.config = None
self.model = None
self.le = None
def load_datasets(self):
'''
Read in the datasets for this WikiProject, join them into a combined
dataset and add the necessary columns.
'''
# read in snapshot
snapshot = pd.read_table(self.config['snapshot file'])
# read in dataset
dataset = pd.read_table(self.config['dataset'])
# read in clickstream
clickstream = pd.read_table(self.config['clickstream file'])
# read in disambiguations
disambiguations = pd.read_table(self.config['disambiguation file'])
# read in the list of side-chained articles
sidechained = pd.read_table(self.config['sidechain file'])
# Log-transform number of inlinks, views, and calculate prop_proj_inlinks
dataset['log_inlinks'] = np.log10(1 + dataset['num_inlinks'])
dataset['log_views'] = np.log10(1 + dataset['num_views'])
dataset['prop_proj_inlinks'] = 1 + dataset['num_proj_inlinks']/(1 + dataset['num_inlinks'])
# Calculate the proportion of clicks from articles
clickstream['prop_from_art'] = np.minimum(
1.0, clickstream['n_from_art']/(1 + clickstream['n_clicks']))
# Join the datasets
# snapshot[dataset[clickstream]]
res = pd.merge(snapshot,
pd.merge(dataset, clickstream,
on='page_id'),
left_on='art_page_id', right_on='page_id')
# filter out pages where the talk page is an archive
res = res[res.talk_is_archive == 0]
# filter out pages where the article is a redirect
res = res[res.art_is_redirect == 0]
# filter out pages where there is no corresponding article
res = res[res.art_page_id > 0]
# filter out disambiguations
res = res[res.art_page_id.isin(disambiguations.page_id) == False]
# filter out all side-chained articles
if not sidechained.empty:
res = res[res.art_page_id.isin(sidechained.page_id) == False]
# calculate proportion of active inlinks
res['prop_act_inlinks'] = np.minimum(
1.0, res['n_act_links']/(1 + res['num_inlinks']))
# add rank variables for views and inlinks, and make them percentiles
res['rank_links'] = res.num_inlinks.rank(method='min')
res['rank_links_perc'] = res.num_inlinks.rank(method='min', pct=True)
res['rank_views'] = res.num_views.rank(method='min')
res['rank_views_perc'] = res.num_views.rank(method='min', pct=True)
# make sure importance ratings are an ordered categorical variable
res['importance_rating'] = res.importance_rating.astype(
'category', categories=['Low', 'Mid', 'High', 'Top'], ordered=True)
self.dataset = res
return()
def predict_ratings(self):
'''
Trim the given dataset down to the right columns, make predictions
of the importance rating, and also probabilities for each rating.
:param dataset: the dataset to make predictions on
:type dataset: `pandas.DataFrame`
'''
X = self.dataset.loc[:, self.config['predictors']].as_matrix()
logging.info('predicting importance ratings')
classes = self.model.predict(X)
logging.info('predicting rating probabilities')
probabilities = self.model.predict_proba(X)
self.dataset['pred_rating'] = pd.Series(classes,
index=self.dataset.index)
for i in range(probabilities.shape[1]):
col_name = 'proba_{}'.format(self.le.inverse_transform(i))
self.dataset[col_name] = probabilities[:,i]
## Return the dataset with predictions and probabilities added
return()
def make_confusion_matrix(self, config_file, print_wikitable=False):
'''
Load in the datasets and models defined in the given configuration file,
then predict the importance of all articles in the datasets.
'''
logging.info('loading the configuration file')
# load in the configuration
with open(config_file) as infile:
self.config = load(infile)
logging.info('loading the model')
# load in the model
with open(self.config['model file'], 'rb') as infile:
self.model = pickle.load(infile)
logging.info('loading the label encoder')
# load in the label encoder
with open(self.config['label encoder file'], 'rb') as infile:
self.le = pickle.load(infile)
logging.info('reading in the datasets')
# read in the datasets
self.load_datasets()
# make predictions for all pages and print out a confusion matrix
logging.info('making predictions')
self.predict_ratings()
## Add a column with the name of the predicted rating
self.dataset['pred_rating_name'] = self.le.inverse_transform(
self.dataset['pred_rating'])
ratings = ['Top', 'High', 'Mid', 'Low'] # ratings in descending order
if print_wikitable:
conf_matrix = confusion_matrix(self.dataset['importance_rating'],
self.dataset['pred_rating_name'],
labels=ratings)
# print header
wikitable = '''{| class="wikitable sortable"
|-
|
'''
for rating in ratings:
wikitable = "{}! {}\n".format(wikitable, rating)
# print content
for (i, rating) in enumerate(ratings):
wikitable = "{}|-\n| {}\n".format(wikitable, rating)
for (j, rating) in enumerate(ratings):
wikitable = "{}| style='text-align:right;' | {{{{formatnum:{n}}}}}\n".format(wikitable, n=conf_matrix[i, j])
# print footer
print(wikitable + "|}")
else:
print(pd.crosstab(self.dataset['importance_rating'],
self.dataset['pred_rating_name'],
rownames=['True'],
colnames=['Predicted'],
margins=True))
return()
def main():
import argparse
cli_parser = argparse.ArgumentParser(
description="script to make predictions for all articles in a WikiProject")
# Verbosity option
cli_parser.add_argument('-v', '--verbose', action='store_true',
help='write informational output')
cli_parser.add_argument('-w', '--wikitable', action='store_true',
help='print the confusion matrix as a wikitable')
## YAML configuration file for the global model
cli_parser.add_argument('config_file',
help='path to the global model YAML configuration file')
args = cli_parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
predictor = WikiProjectPredictor()
predictor.make_confusion_matrix(args.config_file, args.wikitable)
return()
if __name__ == '__main__':
main()
| mit |
saeidadli/Python-ArcGIS-Convertor | arcgdfconvertor/convertor.py | 1 | 3491 | import os
import sys
import tempfile
from pathlib import Path
import arcpy
import pandas as pd
import numpy as np
import geopandas as gpd
#constants
#WGS_1984 coordinate system
WGS_1984 = \
"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', "+\
"SPHEROID['WGS_1984',6378137.0,298.257223563]], "+\
"PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; "+\
"-400 -400 1000000000;-100000 10000;-100000 10000; "+\
"8.98315284119522E-09;0.001;0.001;IsHighPrecision"
#functions
def gdb_path(in_fc):
"""
Returns the properties of a input gis data
"""
if arcpy.Exists(in_fc):
desc = arcpy.Describe(in_fc)
in_fc = desc.catalogPath
fc_name = desc.name
else:
fc_name = os.path.basename(in_fc)
dirname = os.path.dirname(in_fc)
workspace = arcpy.Describe(dirname).dataType
if workspace == 'FeatureDataset':
GDB = os.path.dirname(dirname)
elif workspace == 'Workspace':
GDB = dirname
elif workspace == 'Folder':
GDB = ''
else:
GDB = ''
return GDB, workspace, dirname, fc_name
def get_fields(in_fc, output_type = 'list'):
#Gets list of fileds from a feature class
fields = arcpy.ListFields(in_fc)
if output_type == 'list':
output = [f.name for f in fields]
elif output_type == 'dict':
output = {f.name: f.type for f in fields}
else:
output = ''
return output
#pandas convertor for ArcGIS
def gdf_to_fc(gdf, fc):
"""
converts a geopandas dataframe to a layer in a ESRI file geodatabase.
Notes:
- gdf have to have geometry field.
"""
if 'geometry' not in gdf.columns.values:
sys.exit()
GDB, workspace, dirname, fc_name = gdb_path(fc)
# convert fc to a gpkg in a temporary directory
tmp_dir = tempfile.TemporaryDirectory()
p = Path(tmp_dir.name)
n = fc_name + '.shp'
gdf.to_file(str(p/n))
fc_cols = get_fields(str(p/n))[2:]
#copy the file into a feature class
fc = arcpy.CopyFeatures_management(str(p/n), fc)
gdf_cols = gdf.columns.tolist()
gdf_cols.remove('geometry')
#fixing the columns
if gdf_cols:
col_dict = {col: gdf_cols[indx] for indx, col in enumerate(fc_cols) }
for col in col_dict:
if col_dict[col] != col:
arcpy.AlterField_management(fc, col, col_dict[col], clear_field_alias="true")
# Delete temporary directory
tmp_dir.cleanup()
return fc
def gdf_to_tbl(gdf, tbl):
gdf_cols = gdf.columns.values.tolist()
if 'geometry' in gdf_cols:
gdf_cols.remove('geometry')
gdf = gdf[gdf_cols].copy()
x = np.array(np.rec.fromrecords(gdf.values))
names = gdf.dtypes.index.tolist()
names = [str(arcpy.ValidateTableName(name)) for name in names]
x.dtype.names = tuple(names)
arcpy.da.NumPyArrayToTable(x, tbl)
return tbl
def fc_to_gdf(fc):
#use scratch work space for temporary files
GDB, workspace, dirname, fc_name = gdb_path(fc)
if GDB != '':
gdf = gpd.read_file(GDB, layer = fc_name)
else:
desc = arcpy.Describe(fc)
fc_path = desc.catalogPath
gdf = gpd.read_file(fc_path)
return gdf
def tbl_to_gdf(tbl, fieldnames = None):
gdf = fc_to_gdf(tbl)
if fieldnames != None:
fieldnames = [f for f in fieldnames if f in gdf.columns()]
else:
fieldnames = get_fields(tbl)[1:]
return gdf[fieldnames].copy()
| mit |
aringh/odl | examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py | 1 | 2880 | """Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 512x512 image is reconstructed using the Conjugate Gradient
Least Squares method on the GPU.
In general, ASTRA is faster than ODL since it does not need to perform any
copies and all arithmetic is performed on the GPU. Despite this, ODL is not
much slower. In this example, the overhead is about 60 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import scipy
import odl
# Common geometry parameters
domain_size = np.array([512, 512])
n_angles = 180
det_size = 362
niter = 50
phantom = np.rot90(scipy.misc.ascent().astype('float'), -1)
# --- ASTRA ---
# Define ASTRA geometry
vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1])
proj_geom = astra.create_proj_geom('parallel',
np.linalg.norm(domain_size) / det_size,
det_size,
np.linspace(0, np.pi, n_angles))
# Create ASTRA projector
proj_id = astra.create_projector('cuda', proj_geom, vol_geom)
# Create sinogram
sinogram_id, sinogram = astra.create_sino(phantom, proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CUDA backend
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with odl.util.Timer('ASTRA run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
# --- ODL ---
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size)
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with odl.util.Timer('ODL run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T, origin='lower', cmap='bone')
plt.figure('ASTRA sinogram')
plt.imshow(sinogram.T, origin='lower', cmap='bone')
plt.figure('ASTRA reconstruction')
plt.imshow(rec.T, origin='lower', cmap='bone')
plt.figure('ODL sinogram')
plt.imshow(data.asarray().T, origin='lower', cmap='bone')
plt.figure('ODL reconstruction')
plt.imshow(x.asarray().T, origin='lower', cmap='bone')
plt.show()
| mpl-2.0 |
mne-tools/mne-python | examples/connectivity/mne_inverse_connectivity_spectrum.py | 6 | 3460 | """
==============================================================
Compute full spectrum source space connectivity between labels
==============================================================
The connectivity is computed between 4 labels across the spectrum
between 7.5 Hz and 40 Hz.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Read some labels
names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
for name in names]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin, fmax = 7.5, 40.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=1)
n_rows, n_cols = con.shape[:2]
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
for i in range(n_rows):
for j in range(i + 1):
if i == j:
axes[i, j].set_axis_off()
continue
axes[i, j].plot(freqs, con[i, j, :])
axes[j, i].plot(freqs, con[i, j, :])
if j == 0:
axes[i, j].set_ylabel(names[i])
axes[0, i].set_title(names[i])
if i == (n_rows - 1):
axes[i, j].set_xlabel(names[j])
axes[i, j].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
axes[j, i].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
# Show band limits
for f in [8, 12, 18, 35]:
axes[i, j].axvline(f, color='k')
axes[j, i].axvline(f, color='k')
plt.tight_layout()
plt.show()
| bsd-3-clause |
buqing2009/MissionPlanner | Lib/site-packages/scipy/optimize/nonlin.py | 53 | 46004 | r"""
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
--------
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print 'Residual', abs(residual(sol)).max()
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError
from numpy import asarray, dot, vdot
if sys.platform != 'cli':
import scipy.sparse.linalg
import scipy.sparse
import scipy.lib.blas as blas
import inspect
else:
print "Warning: scipy.optimize.nonlin package is not supported under IronPython yet."
from linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov',
# Deprecated functions:
'broyden_generalized', 'anderson2', 'broyden3']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array-like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : array-like
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
if condition.check(Fx, x, dx):
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x += dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
raise NoConvergence(_array_like(x, x0))
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return True
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return self.iteration > self.iter
# NB: condition must succeed for rtol=inf even if norm == 0
return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# autoscale the initial Jacobian parameter
self.alpha = 0.5*max(norm(x0), 1) / norm(f0)
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [vR]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='qr', econ=True)
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to retain when
rank reduction is done. Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by numerical
differentiation:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [KK]_,
and for the LGMRES sparse inverse method, see [BJM]_.
References
----------
.. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = zip(args[-len(defaults):], defaults)
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that it's keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec wrapper in ns
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
# Deprecated functions
@np.deprecate
def broyden_generalized(*a, **kw):
"""Use *anderson(..., w0=0)* instead"""
kw.setdefault('w0', 0)
return anderson(*a, **kw)
@np.deprecate
def broyden1_modified(*a, **kw):
"""Use `broyden1` instead"""
return broyden1(*a, **kw)
@np.deprecate
def broyden_modified(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def anderson2(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def broyden3(*a, **kw):
"""Use `broyden2` instead"""
return broyden2(*a, **kw)
@np.deprecate
def vackar(*a, **kw):
"""Use `diagbroyden` instead"""
return diagbroyden(*a, **kw)
| gpl-3.0 |
mariocannistra/radio-astronomy | findsessionrange.py | 1 | 1973 | #!/usr/bin/python
# this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617
# this program will determine the overall range of signal strengths received during the whole session.
# this program can be run standalone but is usually run at end of session by doscan.py
# Its output will be stored in 2 files:
# dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum
# and minimum of the whole session. The second contains a chart of all the min and max values for each of
# the scan files
from glob import glob
import numpy as np
import radioConfig
import subprocess
import os
import datetime
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
globmax = -9000
globmin = 9000
sessmin = np.empty(shape=[0, 1])
sessmax = np.empty(shape=[0, 1])
scantimeline = np.empty(shape=[0, 1])
files_in_dir = sorted(glob("*.csv"))
for fname in files_in_dir:
dbs = np.genfromtxt(fname,dtype='float',delimiter = ',', skip_header=0, skip_footer=0, usecols=(6,),usemask=True)
thismin=dbs.min()
thismax=dbs.max()
scantime=str(fname)[11:17]
print scantime,thismin,thismax
if thismin < globmin:
globmin = thismin
if thismax > globmax:
globmax = thismax
sessmin = np.append(sessmin, thismin)
sessmax = np.append(sessmax, thismax)
scantimeline = np.append(scantimeline, scantime)
mytitle = 'Signal strength range: min %f .. max %f' % (globmin,globmax)
print mytitle
xs = range(len(scantimeline))
plt.plot(xs,sessmin )
plt.plot(xs,sessmax )
plt.xticks(xs,scantimeline,rotation=70)
plt.grid()
plt.title(mytitle)
#plt.show()
plt.savefig('session-overview.png')
sessfile = open("dbminmax.txt", "w")
sessfile.write(str(globmax))
sessfile.write("\n")
sessfile.write(str(globmin))
sessfile.write("\n")
sessfile.close()
| mit |
TAMU-CPT/galaxy-tools | tools/genome_viz/brigaid.py | 1 | 36126 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
AUTHOR
Pedro Cerqueira
github: @pedrorvc
DESCRIPTION
This script serves to create xml files contaning the information necessary
for the execution of BRIG (Blast Ring Image Generator), reducing the time
performing the tedious task of setting up all the information on the GUI
and provides a quick way to produce an image.
The arguments for this script provide some (but not all)
of the available options in BRIG, which were the ones I used to change the most.
USAGE:
brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file
-oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt
--contig-order contig_order.tsv
"""
import argparse
import csv
import os
import xml.etree.ElementTree as ET
from collections import OrderedDict
from xml.dom import minidom
from Bio import SeqIO
from matplotlib import cm
def listdir_fullpath(path):
""" Gets the full path of the files from a directory
Args:
path (str): full path to a directory
Returns:
list containing the full path of every file contained in the input directory
"""
return [os.path.join(path, f) for f in os.listdir(path)]
def ring_attributes(colour, name, position):
""" Creates ring attributes.
Args:
colour (str): color of the ring.
name (str): name of the ring.
position (str): position of the ring.
Returns:
ring_attrs (dict): attributes of any regular ring of the BRIG xml.
"""
ring_attrs = {"colour" : colour,
"name": name,
"position" : position,
"upperInt" : "90",
"lowerInt" : "70",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return ring_attrs
def annotation_ring_attributes(position):
""" Creates annotation ring attributes.
Args:
position (str): position of the ring.
Returns:
annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml.
"""
annotation_ring_attrs = {"colour" : '172,14,225',
"name": 'null',
"position" : position,
"upperInt" : "70",
"lowerInt" : "50",
"legend" : "yes",
"size" : "30",
"labels" : "no",
"blastType" : "blastn"}
return annotation_ring_attrs
def create_feature_attrs(label, colour, decoration, start, stop):
""" Create attributes for the Feature SubElements of the annotation ring.
Args:
label (str): name of the gene/CDS to annotate
colour (str): colour of the decoration for the annotation
decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow'
start (str): start of the gene/CDS to annotate
stop (str): stop of the gene/CDS to annotate
Results:
feature_element_attrs (dict): attributes of the feature element.
feature_range_element_attrs (dict): attributes of the feature range element
"""
feature_element_attrs = {'label' : label,
'colour' : colour,
'decoration' : decoration}
feature_range_element_attrs = {'start' : start,
'stop' : stop}
return feature_element_attrs, feature_range_element_attrs
def create_annotation_ring_tsv(annotation_ring, annotation_file):
""" Uses a tsv file to annotate the reference genome.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
"""
with open(annotation_file) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Obtain the annotations from the file contents
for row in reader:
start = row['#START']
stop = row['STOP']
label = row['Label']
colour = row['Colour']
decoration = row['Decoration']
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
#if type(genome_size) == int:
# Obtain the features of the Genbank file records
for fea in record.features:
# Get the start and end position of the genome
# Also get the strand
if fea.type == 'CDS':
start = str(fea.location.start.position)
end = str(fea.location.end.position)
strand = fea.location.strand
# Get the label of the gene or product
if 'gene' in fea.qualifiers:
label = str(fea.qualifiers['gene'][0])
elif 'product' in fea.qualifiers:
product = fea.qualifiers['product'][0]
label = str(product)
else:
continue
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if fea.type == 'source':
size = fea.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False):
""" Creates the annotation ring feature elements, using a concatenated Genbank annotation file
and specific gene annotations.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
record (SeqRecord): Object of BioPython containing the information of the input Genbank.
genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided.
Boolean (False) when a concatenated Genbank is provided.
"""
for f in record.features:
if f.type == 'CDS':
# Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated
if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes:
label = f.qualifiers['gene'][0]
elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes:
product = f.qualifiers['product'][0]
label = product
else:
continue
# Determine the start, stop and strand of the gene
start = str(f.location.start.position + genome_size)
end = str(f.location.end.position + genome_size)
strand = f.location.strand
# Define the decoration of the annotation based on the strand
if strand == -1:
decoration = 'counterclockwise-arrow'
elif strand == 1:
decoration = 'clockwise-arrow'
# Create xml attributes
feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end)
# Create xml elements
feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs)
feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs)
# If a genome size is provided, get the size of the records
if type(genome_size) == int:
if f.type == "source":
size = f.location.end.position
try:
size
genome_size += size
return genome_size
except NameError:
pass
def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records):
""" Create annotation ring using a concatenated Genbank annotation file.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
"""
if genes_of_interest != []:
# Get the genes to serach in the Genbank file
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
# Create feature elements of the annotation ring
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order):
""" Create annotation ring using a Genbank annotation file divided by contigs.
Args:
annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file.
records (SeqRecord): Object of BioPython containing the information of the input Genbank.
contig_order (str): Full path to the file containing the order of the contigs.
"""
if contig_order != []:
with open(contig_order) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
# Create an OrderedDict with the contents of the file
# The keys are the order are a number representing the order of the contig
# The values are the names of the contigs
content_dict = OrderedDict()
for r in reader:
content_dict[r["order"]] = r["contig"]
# Create an OrderedDict with the content of each contig
# The keys are the names of the contigs
# The values are SeqRecord objects from BipPython
seq_records_dict = OrderedDict()
for record in records:
seq_records_dict[record.id] = record
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size)
genome_size = gsize
else:
genome_size = 0
for i in range(1, len(records)+1):
ord_record = seq_records_dict[content_dict[str(i)]]
gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size)
genome_size = gsize
else:
if genes_of_interest != []:
with open(genes_of_interest, "r") as f:
genes = f.readlines()
genes = [gene.rstrip() for gene in genes]
for seq_record in records:
annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes)
else:
for seq_record in records:
annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record)
def write_xml(root_elem, output_file):
""" Writes a xml file.
Args:
root_elem is a ElementTree Element object containing all the information
required for the output file.
output_file (str): full path to the output file
"""
xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8')
pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ')
output_file = output_file + ".xml"
with open(output_file, "w") as f:
f.write(pretty_xml_file)
####### Create xml elemnts
# Create root element
def create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format):
"""
Creates the root element of the xml file and its attributes.
Args:
blast_options (str): additional options for blast, for example, -evalue or num_threads
legend_position (str): position of the legend on the image
query_file (str): full path to the query file
output_folder (str): full path to the output folder
image_output_file (str): full path to the image output file
title (str): title of the output image
image_format (str): format of the image output file
Returns:
root: ElementTree Element object containing the BRIG tag and its attributes
"""
root_attrs = {"blastOptions" : blast_options,
"legendPosition" : legend_position,
"queryFile" : query_file,
"outputFolder" : output_folder,
"blastPlus" : "yes",
"outputFile" : os.path.join(output_folder, image_output_file),
"title" : title,
"imageFormat" : image_format,
"queryFastaFile" : query_file,
"cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")}
root = ET.Element('BRIG', attrib=root_attrs)
return root
#### Create root children
# Create cgview_settings element
def create_cgview_settings_element(root, height, width):
""" Creates the cgview_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
height (str): height of the output image in pixels
width (str): width of the output image in pixels
Returns:
cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes
"""
cgview_settings_attrs = {"arrowheadLength" : "medium",
"backboneColor" : "black",
"backboneRadius" : "600",
"backboneThickness" : "medium",
"backgroundColor" : "white",
"borderColor" : "black",
"featureSlotSpacing" : "medium",
"featureThickness" : "30",
"giveFeaturePositions" : "false",
"globalLabel" : "true",
"height" : height,
"isLinear" : "false",
"labelFont" : "SansSerif,plain,25",
"labelLineLength" : "medium",
"labelLineThickness" : "medium",
"labelPlacementQuality" : "best",
"labelsToKeep" : "1000",
"longTickColor" : "black",
"minimumFeatureLength" : "medium",
"moveInnerLabelsToOuter" :"true",
"origin" : "12",
"rulerFont" : "SansSerif,plain,35",
"rulerFontColor" : "black",
"rulerPadding" : "40",
"rulerUnits" : "bases",
"shortTickColor" : "black",
"shortTickThickness" : "medium",
"showBorder" : "false",
"showShading" : "true",
"showWarning" : "false",
"tickDensity" : "0.2333",
"tickThickness" : "medium",
"titleFont" : "SansSerif,plain,45",
"titleFontColor" : "black",
"useColoredLabelBackgrounds" : "false",
"useInnerLabels" : "true",
"warningFont" : "Default,plain,35",
"warningFontColor" : "black",
"width" : width,
"zeroTickColor" : "black",
"tickLength" : "medium"}
cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs)
return cgview_settings
# Create brig_settings element
def create_brig_settings_element(root, java_memory):
""" Creates the brig_settings element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG
Returns:
brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes
"""
brig_settings_attrs = {"Ring1" : "172,14,225",
"Ring2" : "222,149,220",
"Ring3" : "161,221,231",
"Ring4" : "49,34,221",
"Ring5" : "116,152,226",
"Ring6" : "224,206,38",
"Ring7" : "40,191,140",
"Ring8" : "158,223,139",
"Ring9" : "226,38,122",
"Ring10" :"211,41,77",
"defaultUpper" : "70",
"defaultLower" : "50",
"defaultMinimum" : "50",
"genbankFiles" : "gbk,gb,genbank",
"fastaFiles" : "fna,faa,fas,fasta,fa",
"emblFiles" : "embl",
"blastLocation" : "",
"divider" : "3",
"multiplier" : "3",
"memory" : java_memory,
"defaultSpacer" : "0"}
brig_settings = ET.SubElement(root,
"brig_settings",
attrib=brig_settings_attrs)
return brig_settings
## Create special element
def create_special_element(root):
"""Creates the 'special' element of the xml file and its attributes
Args:
root: ElementTree Element object containing the BRIG tag and its attributes.
Returns:
gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes
gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes
"""
gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'})
gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'})
return gc_content_special, gc_skew_special
# Create reference dir element
def create_reference_directory_element(root, reference_directory):
""" Creates the 'reference directory' element of the xml file and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
Returns:
ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes
"""
ref_dir = ET.SubElement(root,
"refDir",
attrib={"location" : reference_directory})
# Obtain the full path for all the files in the directory
ref_dir_list = listdir_fullpath(reference_directory)
for f in ref_dir_list:
ref_file = ET.SubElement(ref_dir,
"refFile",
attrib={"location" : f})
return ref_file
# Create the ring where the annotations are defined
def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order):
""" Creates the ring that will contain the annotations for the reference genome.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
annotation_file (str): Full path to the file containing annotations for the reference genome.
genes_of_interest (str): Full path to the file containing a list of specific genes.
contig_order (str): Full path to the tab-delimited file containing the order of the contigs.
"""
# Determine the position of the annotation ring, which will be the position after the last reference genome
ring_position = len(os.listdir(reference_directory)) + 2
# Create the annotation ring element
annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position)))
# Check for tab-delimited annotation file input
if list(SeqIO.parse(annotation_file, "genbank")) == []:
create_annotation_ring_tsv(annotation_ring, annotation_file)
else:
# Get the records of the Genbank file
records = [r for r in SeqIO.parse(annotation_file, "genbank")]
### Check if a contig order file has been provided
if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs
create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order)
else:
create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records)
## Create remaining rings
def create_ring_element(root, reference_directory, colormap):
""" Creates the ring elements of the xml file, containing the position and color of the rings.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
reference_directory (str): full path to the reference directory that contains
the fasta files used to build the rings.
colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings
Returns:
ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes
ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
ref_dir_list = listdir_fullpath(reference_directory)
# Gets the colormap from matplotlib with as many colors as the number of files
cmap = cm.get_cmap(colormap, len(ref_dir_list))
list_colormap = cmap.colors.tolist()
# Remove the fourth element (transparency) because it is not necessary
colors_to_use = []
for l in list_colormap:
convert = [round(x * 255) for x in l]
convert.pop()
colors_to_use.append(convert)
#reversed_colors_to_use = colors_to_use[::-1]
# Check if the user provided an order for the rings
has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list]
if True in has_digit:
# Obtain the ring positions
ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list]
# Reverse sort the positions of the rings, because they will be created
# in a descending order of their positions
ring_positions.sort(reverse=True)
ref_dir_list.sort(reverse=True)
for ring in range(len(ref_dir_list)):
# The ring positions start at 2 due to the special rings (GC Content and GC Skew)
ring_position = int(ring_positions[ring]) + 1
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_position)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
else:
# Sort files by lowercase
ref_dir_list.sort(key=lambda y: y.lower())
# The number of rings starts at 2 due to the GC Content and GC Skew
ring_number = len(ref_dir_list) + 1
for ring in range(len(ref_dir_list)):
# Select a color for the ring
ring_color = ",".join([str(e) for e in colors_to_use[ring]])
# Define the name of the ring
ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0]
# Create the xml elements
ring_number_element = ET.SubElement(root,
'ring',
ring_attributes(ring_color, ring_name, str(ring_number)))
ring_sequence_element = ET.SubElement(ring_number_element,
"sequence",
attrib={"location" : ref_dir_list[ring]})
ring_number -= 1
return ring_number_element, ring_sequence_element
## Create special rings
def create_special_ring_element(root):
""" Create the 'special' ring element and its attributes.
Args:
root: ElementTree Element object containing the 'BRIG' tag and its attributes.
Returns:
gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes
"""
# Create ring attributes
gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0")
gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1")
# Add ring element to root
gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs)
gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs)
# Add sequence element to ring
gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'})
gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'})
return gc_content_location, gc_skew_location
def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file,
genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap):
root = create_root_element(blast_options, legend_position, query_file,
output_folder, image_output_file, title, image_format)
cgview_settings = create_cgview_settings_element(root, height, width)
brig_settings = create_brig_settings_element(root, java_memory)
special = create_special_element(root)
refdir = create_reference_directory_element(root, reference_directory)
if annotation_file:
create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order)
rings = create_ring_element(root, reference_directory, colormap)
special_ring = create_special_ring_element(root)
write_xml(root, output_xml)
print("\n File written to {}".format(output_xml))
def parse_arguments():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-q', '--query', type=str, required=True, dest='query_file',
help='Path to the query/reference FASTA file.')
parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory',
help='Path to the directory where the FASTA files to compare against the reference are located.')
parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder',
help='Path to the output directory for the results of BRIG.')
parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file',
help='Path to the output of this script.')
parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file',
help='Path to the output file of the resulting image of BRIG.')
parser.add_argument('-t', '--title', type=str, required=True, dest='title',
help='Title of the resulting image from BRIG.')
parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False,
help='File containing annotations for the reference genome. '
'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)')
parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[],
help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ')
parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[],
help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. '
'Example: order contig '
'1 Contig8')
parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6",
help='Options for running BLAST.')
parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right",
help='Positon of the legend on the resulting image.'
'The options available are upper, center or lower, '
'paired with left, center or right')
parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg",
help='Format of the resulting image file.'
'The available options are: jpg, png, svg or svgz.')
parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000",
help='Height (in pixels) of the resulting image.')
parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000",
help='Width (in pixels) of the resulting image.')
parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500",
help='Amount of memory (in bytes) that Java is allowed to use for BRIG.')
parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis",
help='Colormap from matplotlib to use for the color of the rings. '
'The available options are: viridis, plasma, inferno, magma and cividis.'
'More options for colormaps at: '
'https://matplotlib.org/users/colormaps.html')
args = parser.parse_args()
return [args.query_file, args.reference_directory, args.output_folder, args.output_file,
args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order,
args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap]
if __name__ == '__main__':
args = parse_arguments()
main(args[0], args[1], args[2], args[3], args[4], args[5], args[6],
args[7], args[8], args[9], args[10], args[11], args[12], args[13],
args[14], args[15])
| gpl-3.0 |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/tests/test_basic.py | 7 | 1290 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from nose.tools import assert_equal
from matplotlib.testing.decorators import knownfailureif
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tests/test_algos.py | 9 | 24744 | # -*- coding: utf-8 -*-
from pandas.compat import range
import numpy as np
from numpy.random import RandomState
from pandas.core.api import Series, Categorical, CategoricalIndex
import pandas as pd
from pandas import compat
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas.hashtable as hashtable
class TestMatch(tm.TestCase):
_multiprocess_can_split_ = True
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = pd.Series(np.arange(5),dtype=np.float32)
result = algos.match(s, [2,4])
expected = np.array([-1, -1, 0, -1, 1])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2,4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result,expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result,expected)
class TestFactorize(tm.TestCase):
_multiprocess_can_split_ = True
def test_warn(self):
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
algos.factorize(s, order='A')
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
# self.assert_numpy_array_equal(labels, np.array([ 0, 1, 1, 0, 0, 2, 2, 2],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['a','b','c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
self.assert_numpy_array_equal(labels, np.array([ 0, 1, 1, 0, 0, 2, 2, 2],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['a','b','c'], dtype=object))
labels, uniques = algos.factorize(list(reversed(range(5))))
self.assert_numpy_array_equal(labels, np.array([0, 1, 2, 3, 4], dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
self.assert_numpy_array_equal(labels, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([0, 1, 2, 3, 4], dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
self.assert_numpy_array_equal(labels, np.array([0., 1., 2., 3., 4.], dtype=np.float64))
self.assert_numpy_array_equal(uniques, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(np.arange(5.))), sort=True)
self.assert_numpy_array_equal(labels, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([0., 1., 2., 3., 4.], dtype=np.float64))
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0, 0, -1, 1, 2, 3],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['A', 'B', 3.14, np.inf], dtype=object))
labels, uniques = algos.factorize(x, sort=True)
self.assert_numpy_array_equal(labels, np.array([ 2, 2, -1, 3, 0, 1],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([3.14, np.inf, 'A', 'B'], dtype=object))
def test_datelike(self):
# M8
v1 = pd.Timestamp('20130101 09:00:00.00004')
v2 = pd.Timestamp('20130101')
x = Series([v1,v1,v1,v2,v2,v1])
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([v1.value,v2.value],dtype='M8[ns]'))
labels, uniques = algos.factorize(x, sort=True)
self.assert_numpy_array_equal(labels, np.array([ 1,1,1,0,0,1],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([v2.value,v1.value],dtype='M8[ns]'))
# period
v1 = pd.Period('201302',freq='M')
v2 = pd.Period('201303',freq='M')
x = Series([v1,v1,v1,v2,v2,v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x,sort=True)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = hashtable.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
self.assertTrue(np.array_equal(pd.isnull(key), expected == na_sentinel))
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)
expected = np.array([ 2, -1, 0], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
self.assertTrue(np.array_equal(pd.isnull(key), expected == na_sentinel))
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)
test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')]
for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)
class TestIndexer(tm.TestCase):
_multiprocess_can_split_ = True
def test_outer_join_indexer(self):
typemap = [('int32', algos.algos.outer_join_indexer_int32),
('int64', algos.algos.outer_join_indexer_int64),
('float32', algos.algos.outer_join_indexer_float32),
('float64', algos.algos.outer_join_indexer_float64),
('object', algos.algos.outer_join_indexer_object)]
for dtype, indexer in typemap:
left = np.arange(3, dtype = dtype)
right = np.arange(2,5, dtype = dtype)
empty = np.array([], dtype = dtype)
result, lindexer, rindexer = indexer(left, right)
tm.assertIsInstance(result, np.ndarray)
tm.assertIsInstance(lindexer, np.ndarray)
tm.assertIsInstance(rindexer, np.ndarray)
tm.assert_numpy_array_equal(result, np.arange(5, dtype = dtype))
tm.assert_numpy_array_equal(lindexer, np.array([0, 1, 2, -1, -1]))
tm.assert_numpy_array_equal(rindexer, np.array([-1, -1, 0, 1, 2]))
result, lindexer, rindexer = indexer(empty, right)
tm.assert_numpy_array_equal(result, right)
tm.assert_numpy_array_equal(lindexer, np.array([-1, -1, -1]))
tm.assert_numpy_array_equal(rindexer, np.array([0, 1, 2]))
result, lindexer, rindexer = indexer(left, empty)
tm.assert_numpy_array_equal(result, left)
tm.assert_numpy_array_equal(lindexer, np.array([0, 1, 2]))
tm.assert_numpy_array_equal(rindexer, np.array([-1, -1, -1]))
class TestUnique(tm.TestCase):
_multiprocess_can_split_ = True
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
tm.assertIsInstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
tm.assertIsInstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5),
np.tile(np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np.array(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'], dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
s = pd.Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
s = pd.Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
class TestIsin(tm.TestCase):
_multiprocess_can_split_ = True
def test_invalid(self):
self.assertRaises(TypeError, lambda : algos.isin(1,1))
self.assertRaises(TypeError, lambda : algos.isin(1,[1]))
self.assertRaises(TypeError, lambda : algos.isin([1],1))
def test_basic(self):
result = algos.isin([1,2],[1])
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1,2]),[1])
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(pd.Series([1,2]),[1])
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(pd.Series([1,2]),pd.Series([1]))
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a','b'],['a'])
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(pd.Series(['a','b']),pd.Series(['a']))
expected = np.array([True,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a','b'],[1])
expected = np.array([False,False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.date_range('20130101',periods=3).values
result = algos.isin(arr,[arr[0]])
expected = np.array([True,False,False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr,arr[0:2])
expected = np.array([True,True,False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day',periods=3).values
result = algos.isin(arr,[arr[0]])
expected = np.array([True,False,False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101',periods=2000000,freq='s').values
result = algos.isin(s,s[0:2])
expected = np.zeros(len(s),dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(tm.TestCase):
_multiprocess_can_split_ = True
def test_value_counts(self):
np.random.seed(1234)
from pandas.tools.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
tm.assertIsInstance(factor, Categorical)
result = algos.value_counts(factor)
cats = ['(-1.194, -0.535]',
'(-0.535, 0.121]',
'(0.121, 0.777]',
'(0.777, 1.433]'
]
expected_index = CategoricalIndex(cats, cats, ordered=True)
expected = Series([1, 1, 1, 1],
index=expected_index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
self.assertEqual(result.tolist(), [4])
self.assertEqual(result.index[0], 0.997)
result = algos.value_counts(s, bins=2, sort=False)
self.assertEqual(result.tolist(), [2, 2])
self.assertEqual(result.index[0], 0.997)
self.assertEqual(result.index[1], 2.5)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
self.assertEqual(len(result), 1)
result = algos.value_counts([1, 1.], bins=1)
self.assertEqual(len(result), 1)
result = algos.value_counts(Series([1, 1., '1'])) # object
self.assertEqual(len(result), 2)
self.assertRaises(TypeError, lambda s: algos.value_counts(s, bins=1), ['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
self.assertEqual(len(vc), 1)
self.assertEqual(len(vc_with_na), 2)
exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_categorical(self):
s = Series(pd.Categorical(list('aaabbc')))
result = s.value_counts()
expected = pd.Series([3, 2, 1], index=pd.CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(pd.Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = pd.Series([4, 3, 2],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(pd.Categorical(list('aaaaabbbcc'),
ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = pd.Series([4, 3, 2],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=['b', 'a', 'c'],
ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = pd.Series([4, 3, 2, 1], index=pd.CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(pd.Categorical(list('bbbaac'), categories=list('abcd'),
ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=pd.Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=True),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=False),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False, None]).value_counts(dropna=True),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False, None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5.]).value_counts(dropna=True),
pd.Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5.]).value_counts(dropna=False),
pd.Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5., None]).value_counts(dropna=True),
pd.Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(
pd.Series([10.3, 5., 5., None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[5., 10.3, np.nan]))
class GroupVarTestMixin(object):
def test_group_var_generic_1d(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 1))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(15, 1).astype(self.dtype)
labels = np.tile(np.arange(5), (3, )).astype('int64')
expected_out = (np.squeeze(values)
.reshape((5, 3), order='F')
.std(axis=1, ddof=1) ** 2)[:, np.newaxis]
expected_counts = counts + 3
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_1d_flat_labels(self):
prng = RandomState(1234)
out = (np.nan * np.ones((1, 1))).astype(self.dtype)
counts = np.zeros(1, dtype='int64')
values = 10 * prng.rand(5, 1).astype(self.dtype)
labels = np.zeros(5, dtype='int64')
expected_out = np.array([[values.std(ddof=1) ** 2]])
expected_counts = counts + 5
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_all_finite(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.std(
values.reshape(2, 5, 2), ddof=1, axis=0) ** 2
expected_counts = counts + 2
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_generic_2d_some_nan(self):
prng = RandomState(1234)
out = (np.nan * np.ones((5, 2))).astype(self.dtype)
counts = np.zeros(5, dtype='int64')
values = 10 * prng.rand(10, 2).astype(self.dtype)
values[:, 1] = np.nan
labels = np.tile(np.arange(5), (2, )).astype('int64')
expected_out = np.vstack([
values[:, 0].reshape(5, 2, order='F').std(ddof=1, axis=1) ** 2,
np.nan * np.ones(5)
]).T
expected_counts = counts + 2
self.algo(out, counts, values, labels)
np.testing.assert_allclose(out, expected_out, self.rtol)
tm.assert_numpy_array_equal(counts, expected_counts)
def test_group_var_constant(self):
# Regression test from GH 10448.
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0],dtype='int64')
values = 0.832845131556193 * np.ones((3, 1), dtype=self.dtype)
labels = np.zeros(3, dtype='int64')
self.algo(out, counts, values, labels)
self.assertEqual(counts[0], 3)
self.assertTrue(out[0, 0] >= 0) # Python 2.6 has no assertGreaterEqual
tm.assert_almost_equal(out[0, 0], 0.0)
class TestGroupVarFloat64(tm.TestCase, GroupVarTestMixin):
__test__ = True
_multiprocess_can_split_ = True
algo = algos.algos.group_var_float64
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = RandomState(1234)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0],dtype='int64')
values = (prng.rand(10 ** 6) + 10 ** 12).astype(self.dtype)
values.shape = (10 ** 6, 1)
labels = np.zeros(10 ** 6, dtype='int64')
self.algo(out, counts, values, labels)
self.assertEqual(counts[0], 10 ** 6)
tm.assert_almost_equal(out[0, 0], 1.0 / 12, check_less_precise=True)
class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin):
__test__ = True
_multiprocess_can_split_ = True
algo = algos.algos.group_var_float32
dtype = np.float32
rtol = 1e-2
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
from pandas.hashtable import unique_label_indices
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_numpy_array_equal(left, right)
a[np.random.choice(len(a), 10)] = -1
left= unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_numpy_array_equal(left, right)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
hoytak/SFrame | oss_src/unity/python/sframe/test/test_dataframe.py | 5 | 1775 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import unittest
import pandas
import array
from .. import SFrame
from pandas.util.testing import assert_frame_equal
from sys import version_info
class DataFrameTest(unittest.TestCase):
def test_empty(self):
expected = pandas.DataFrame()
assert_frame_equal(SFrame(expected).to_dataframe(), expected)
expected['int'] = []
expected['float'] = []
expected['str'] = []
assert_frame_equal(SFrame(expected).to_dataframe(), expected)
def test_simple_dataframe(self):
expected = pandas.DataFrame()
expected['int'] = [i for i in range(10)]
expected['float'] = [float(i) for i in range(10)]
expected['str'] = [str(i) for i in range(10)]
if version_info.major == 2:
expected['unicode'] = [unicode(i) for i in range(10)]
expected['array'] = [array.array('d', [i]) for i in range(10)]
expected['ls'] = [[str(i)] for i in range(10)]
assert_frame_equal(SFrame(expected).to_dataframe(), expected)
def test_sparse_dataframe(self):
expected = pandas.DataFrame()
expected['sparse_int'] = [i if i % 2 == 0 else None for i in range(10)]
expected['sparse_float'] = [float(i) if i % 2 == 1 else None for i in range(10)]
expected['sparse_str'] = [str(i) if i % 3 == 0 else None for i in range(10)]
expected['sparse_array'] = [array.array('d', [i]) if i % 5 == 0 else None for i in range(10)]
expected['sparse_list'] = [[str(i)] if i % 7 == 0 else None for i in range(10)]
assert_frame_equal(SFrame(expected).to_dataframe(), expected)
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_cont/Geneva_noRot_cont_age5/UV2.py | 33 | 7365 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('UV_Lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
sumspr/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/oneoffs/training_curve.py | 8 | 5964 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used to plot the accuracy of the policy and value networks in
predicting professional game moves and results over the course
of training. Check FLAGS for default values for what models to
load and what sgf files to parse.
Usage:
python training_curve.py
Sample 3 positions from each game
python training_curve.py --num_positions=3
Only grab games after 2005 (default is 2000)
python training_curve.py --min_year=2005
"""
import sys
sys.path.insert(0, '.')
import os.path
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from absl import app, flags
from tqdm import tqdm
import coords
from rl_loop import fsdb
import oneoff_utils
flags.DEFINE_string("sgf_dir", None, "sgf database")
flags.DEFINE_string("plot_dir", "data", "Where to save the plots.")
flags.DEFINE_integer("min_year", "2000",
"Only take sgf games with date >= min_year")
flags.DEFINE_string("komi", "7.5", "Only take sgf games with given komi")
flags.DEFINE_integer("idx_start", 150, "Only take models after given idx")
flags.DEFINE_integer("num_positions", 1,
"How many positions from each game to sample from.")
flags.DEFINE_integer("eval_every", 5,
"Eval every k models to generate the curve")
flags.mark_flag_as_required('sgf_dir')
FLAGS = flags.FLAGS
def batch_run_many(player, positions, batch_size=100):
"""Used to avoid a memory oveflow issue when running the network
on too many positions. TODO: This should be a member function of
player.network?"""
prob_list = []
value_list = []
for idx in range(0, len(positions), batch_size):
probs, values = player.network.run_many(positions[idx:idx + batch_size])
prob_list.append(probs)
value_list.append(values)
return np.concatenate(prob_list, axis=0), np.concatenate(value_list, axis=0)
def eval_player(player, positions, moves, results):
probs, values = batch_run_many(player, positions)
policy_moves = [coords.from_flat(c) for c in np.argmax(probs, axis=1)]
top_move_agree = [moves[idx] == policy_moves[idx]
for idx in range(len(moves))]
square_err = (values - results) ** 2 / 4
return top_move_agree, square_err
def sample_positions_from_games(sgf_files, num_positions=1):
pos_data = []
move_data = []
result_data = []
move_idxs = []
fail_count = 0
for path in tqdm(sgf_files, desc="loading sgfs", unit="games"):
try:
positions, moves, results = oneoff_utils.parse_sgf_to_examples(path)
except KeyboardInterrupt:
raise
except Exception as e:
print("Parse exception:", e)
fail_count += 1
continue
# add entire game
if num_positions == -1:
pos_data.extend(positions)
move_data.extend(moves)
move_idxs.extend(range(len(positions)))
result_data.extend(results)
else:
for idx in np.random.choice(len(positions), num_positions):
pos_data.append(positions[idx])
move_data.append(moves[idx])
result_data.append(results[idx])
move_idxs.append(idx)
print("Sampled {} positions, failed to parse {} files".format(
len(pos_data), fail_count))
return pos_data, move_data, result_data, move_idxs
def get_training_curve_data(
model_dir, pos_data, move_data, result_data, idx_start, eval_every):
model_paths = oneoff_utils.get_model_paths(model_dir)
df = pd.DataFrame()
player = None
print("Evaluating models {}-{}, eval_every={}".format(
idx_start, len(model_paths), eval_every))
for idx in tqdm(range(idx_start, len(model_paths), eval_every)):
if player:
oneoff_utils.restore_params(model_paths[idx], player)
else:
player = oneoff_utils.load_player(model_paths[idx])
correct, squared_errors = eval_player(
player=player, positions=pos_data,
moves=move_data, results=result_data)
avg_acc = np.mean(correct)
avg_mse = np.mean(squared_errors)
print("Model: {}, acc: {:.4f}, mse: {:.4f}".format(
model_paths[idx], avg_acc, avg_mse))
df = df.append({"num": idx, "acc": avg_acc,
"mse": avg_mse}, ignore_index=True)
return df
def save_plots(data_dir, df):
plt.plot(df["num"], df["acc"])
plt.xlabel("Model idx")
plt.ylabel("Accuracy")
plt.title("Accuracy in Predicting Professional Moves")
plot_path = os.path.join(data_dir, "move_acc.pdf")
plt.savefig(plot_path)
plt.figure()
plt.plot(df["num"], df["mse"])
plt.xlabel("Model idx")
plt.ylabel("MSE/4")
plt.title("MSE in predicting outcome")
plot_path = os.path.join(data_dir, "value_mse.pdf")
plt.savefig(plot_path)
def main(unusedargv):
sgf_files = oneoff_utils.find_and_filter_sgf_files(
FLAGS.sgf_dir, FLAGS.min_year, FLAGS.komi)
pos_data, move_data, result_data, move_idxs = sample_positions_from_games(
sgf_files=sgf_files, num_positions=FLAGS.num_positions)
df = get_training_curve_data(fsdb.models_dir(), pos_data, move_data,
result_data, FLAGS.idx_start, FLAGS.eval_every)
save_plots(FLAGS.plot_dir, df)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
RachitKansal/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
numb3r33/StumbpleUponChallenge | src/data/make_dataset.py | 1 | 1475 | import pandas as pd
import numpy as np
import json
from unidecode import unidecode
def extract_domain(url):
# extract domains
domain = url.lower().split('/')[2]
domain_parts = domain.split('.')
# e.g. co.uk
if domain_parts[-2] not in ['com', 'co']:
return '.'.join(domain_parts[-2:])
else:
return '.'.join(domain_parts[-3:])
def load_csv(filename):
return pd.read_table(filename)
def parse_data(df):
data = []
columns = df.columns
for key, row in df.iterrows():
item = {}
for column in columns:
item[column] = row[column]
# parse url
item['real_url'] = row['url'].lower()
item['domain'] = extract_domain(row['url'])
item['tld'] = item['domain'].split('.')[-1]
# parse boilerplate
boilerplate = json.loads(row['boilerplate'])
for f in ['title', 'url', 'body']:
item[f] = boilerplate[f] if f in boilerplate else u''
item[f] = unidecode(item[f]) if item[f] else ''
if 'label' in row:
item['label'] = row['label']
else:
item['label'] = np.nan
data.append(item)
return data
def get_train():
train = load_csv('../data/raw/train.tsv')
return (parse_data(train))
def get_test():
test = load_csv('../data/raw/test.tsv')
return (parse_data(test))
| mit |
stonebig/bokeh | bokeh/util/hex.py | 2 | 8263 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexagonal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'axial_to_cartesian',
'cartesian_to_axial',
'hexbin',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q, r, size, orientation, aspect_scale=1):
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x, y, size, orientation, aspect_scale=1):
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1):
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q, r):
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/transforms.py | 2 | 88425 | """
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
from __future__ import print_function, division
import numpy as np
from numpy import ma
from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
update_path_extents)
from numpy.linalg import inv
from weakref import WeakValueDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
from path import Path
DEBUG = False
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Creates a new :class:`TransformNode`.
**shorthand_name** - a string representing the "name" of this
transform. The name carries no significance
other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
# Parents are stored in a WeakValueDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakValueDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or it's repr
return self._shorthand_name or repr(self)
def __getstate__(self):
d = self.__dict__.copy()
# turn the weakkey dictionary into a normal dictionary
d['_parents'] = dict(self._parents.iteritems())
return d
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a WeakValueDictionary
self._parents = WeakValueDictionary(self._parents)
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and triggers an
invalidation of its ancestors. Should be called any
time the transform changes.
"""
value = self.INVALID
if self.is_affine:
value = self.INVALID_AFFINE
return self._invalidate_internal(value, invalidating_node=self)
def _invalidate_internal(self, value, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# determine if this call will be an extension to the invalidation
# status. If not, then a shortcut means that we needn't invoke an
# invalidation up the transform stack as it will already have been
# invalidated.
# N.B This makes the invalidation sticky, once a transform has been
# invalidated as NON_AFFINE, then it will always be invalidated as
# NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
# In most cases this is not a problem (i.e. for interactive panning and
# zooming) and the only side effect will be on performance.
status_changed = self._invalid < value
if self.pass_through or status_changed:
self._invalid = value
for parent in self._parents.values():
parent._invalidate_internal(value=value,
invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[id(self)] = self
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
Once the "dot" file has been created, it can be turned into a
png easily with::
$> dot -Tpng -o $OUTPUT_FILE $DOT_FILE
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val)
for key, val
in props.iteritems()])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in root.__dict__.iteritems():
if val is child:
name = key
break
fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
% (hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicitly.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1, 0] - points[0, 0] == 0 or
points[1, 1] - points[0, 1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates
that define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates
that define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
bottom-left corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
top-right corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
:attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y > y0 and y < y1))
or (y > y1 and y < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
return Bbox(transform.transform(self.get_points()))
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return Bbox(transform.inverted().transform(self.get_points()))
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container=None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, basestring):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
assert box_aspect > 0 and fig_aspect > 0
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, bboxes)
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
assert(len(bboxes))
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points, **kwargs):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self, **kwargs)
self._points = np.asarray(points, np.float_)
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
_unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_)
@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(Bbox._unit_values.copy())
@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __repr__(self):
return 'Bbox(%r)' % repr(self._points)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with "
"update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
'return whether the bbox has changed since init'
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
'return whether the x-limits have changed since init'
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
'return whether the y-limits have changed since init'
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
assert bbox.is_bbox
assert isinstance(transform, Transform)
assert transform.input_dims == 2
assert transform.output_dims == 2
BboxBase.__init__(self, **kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%r, %r)" % (self._bbox, self._transform)
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :attr:`has_inverse` is True)
If the transform needs to do something non-standard with
:class:`matplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
input_dims = None
"""
The number of input dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
output_dims = None
"""
The number of output dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
has_inverse = False
"""True if this transform has a corresponding inverse transform."""
is_separable = False
"""True if this transform is separable in the x- and y- dimensions."""
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __eq__(self, other):
# equality is based on transform object id. Hence:
# Transform() != Transform().
# Some classes, such as TransformWrapper & AffineBase, will override.
return self is other
def _iter_break_from_left_to_right(self):
"""
Returns an iterator breaking down this transform stack from left to
right recursively. If self == ((A, N), A) then the result will be an
iterator which yields I : ((A, N), A), followed by A : (N, A),
followed by (A, N) : (A), but not ((A, N), A) : I.
This is equivalent to flattening the stack then yielding
``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
"""
yield IdentityTransform(), self
@property
def depth(self):
"""
Returns the number of transforms which have been chained
together to form this Transform instance.
.. note::
For the special case of a Composite transform, the maximum depth
of the two is returned.
"""
return 1
def contains_branch(self, other):
"""
Return whether the given transform is a sub-tree of this transform.
This routine uses transform equality to identify sub-trees, therefore
in many situations it is object id which will be used.
For the case where the given transform represents the whole
of this transform, returns True.
"""
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
"""
Returns whether the given branch is a sub-tree of this transform on
each seperate dimension.
A common use for this method is to identify if a transform is a blended
transform containing an axes' data transform. e.g.::
x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
"""
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each seperate dimension is the same, so
# just return the appropriate shape.
return [self.contains_branch(other_transform)] * 2
def __sub__(self, other):
"""
Returns a transform stack which goes all the way down self's transform
stack, and then ascends back up other's stack. If it can, this is
optimised::
# normally
A - B == a + b.inverted()
# sometimes, when A contains the tree B there is no need to
# descend all the way down to the base of A (via B), instead we
# can just stop at B.
(A + B) - (B)^-1 == A
# similarly, when B contains tree A, we can avoid decending A at
# all, basically:
A - (A + B) == ((B + A) - A).inverted() or B^-1
For clarity, the result of ``(A + B) - B + B == (A + B)``.
"""
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError("The shortcut cannot be computed since "
"other's transform includes a non-invertable component.")
return remainder.inverted()
# if we have got this far, then there was no shortcut possible
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
"""
Array interface to get at this Transform's affine matrix.
"""
return self.get_affine().get_matrix()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.transform_affine(self.transform_non_affine(values))
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return self.get_affine().transform(values)
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
"""
return values
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def get_matrix(self):
"""
Get the Affine transformation array for the affine part
of this transform.
"""
return self.get_affine().get_matrix()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
assert len(point) == self.input_dims
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
"""
Returns a path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
"""
Returns a path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return Path(self.transform_non_affine(path.vertices), path.codes,
path._interpolation_steps)
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
# pts must be array with 2 columns for x,y
assert pts.shape[1] == 2
# angles must be a column vector and have same number of
# rows as pts
assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0]
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
# Transform both sets of points
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
assert isinstance(child, Transform)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __eq__(self, other):
return self._child.__eq__(other)
if DEBUG:
def __str__(self):
return str(self._child)
def __getstate__(self):
# only store the child
return {'child': self._child}
def __setstate__(self, state):
# re-initialise the TransformWrapper with the state's child
self.__init__(state['child'])
def __repr__(self):
return "TransformWrapper(%r)" % self._child
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
# child can be changed with WrappedTransform.set and so checking
# is_affine and other such properties may be dangerous.
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
assert child.input_dims == self.input_dims
assert child.output_dims == self.output_dims
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_affine(self):
return self._child.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self, *args, **kwargs):
Transform.__init__(self, *args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
# optimises the access of the transform matrix vs the superclass
return self.get_matrix()
@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
def __eq__(self, other):
if other.is_affine:
return np.all(self.get_matrix() == other.get_matrix())
return NotImplemented
def transform(self, values):
return self.transform_affine(values)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, values):
raise NotImplementedError('Affine subclasses should override this '
'method.')
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self.transform_path_affine(path)
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
has_inverse = True
input_dims = 2
output_dims = 2
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform(point, mtx)
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(points))
return self._transform_affine(points)
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self, **kwargs)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
# def __cmp__(self, other):
# # XXX redundant. this only tells us eq.
# if (isinstance(other, Affine2D) and
# (self.get_matrix() == other.get_matrix()).all()):
# return 0
# return -1
@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
assert isinstance(other, Affine2DBase)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees * np.pi / 180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return points
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedAffine2D.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
@property
def depth(self):
return max([self._x.depth, self._y.depth])
def contains_branch(self, other):
# a blended transform cannot possibly contain a branch from two different transforms.
return False
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def _get_has_inverse(self):
return self._x.has_inverse and self._y.has_inverse
has_inverse = property(_get_has_inverse)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
assert x_transform.is_affine
assert y_transform.is_affine
assert x_transform.is_separable
assert y_transform.is_separable
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedGenericTransform.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedTransform.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _invalidate_internal(self, value, invalidating_node):
# In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs
# to be extended to invalidate the NON_AFFINE part too. These cases are when the right
# hand transform is non-affine and either:
# (a) the left hand transform is non affine
# (b) it is the left hand node which has triggered the invalidation
if value == Transform.INVALID_AFFINE \
and not self._b.is_affine \
and (not self._a.is_affine or invalidating_node is self._a):
value = Transform.INVALID
Transform._invalidate_internal(self, value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
@property
def depth(self):
return self._a.depth + self._b.depth
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
def __repr__(self):
return "CompositeGenericTransform(%r, %r)" % (self._a, self._b)
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def _get_has_inverse(self):
return self._a.has_inverse and self._b.has_inverse
has_inverse = property(_get_has_inverse)
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
assert a.output_dims == b.input_dims
self.input_dims = a.input_dims
self.output_dims = b.output_dims
assert a.is_affine
assert b.is_affine
Affine2DBase.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
def __repr__(self):
return "CompositeAffine2D(%r, %r)" % (self._a, self._b)
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
assert boxin.is_bbox
assert boxout.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%r, %r)" % (self._boxin, self._boxout)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
assert boxout.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox` with a fixed upper left of (0, 0).
"""
def __repr__(self):
return "BboxTransformToMaxOnly(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin, **kwargs):
assert boxin.is_bbox
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%r)" % (self._boxin)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
Affine2DBase.__init__(self, **kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%r)" % (self._t,)
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
.. note::
Paths are considered immutable by this class. Any update to the
path's vertices/codes will not trigger a transform recomputation.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
assert isinstance(transform, Transform)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
# only recompute if the invalidation includes the non_affine part of the transform
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path(self._transform.transform_non_affine(self._path.vertices),
None, self._path._interpolation_steps)
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Modify the endpoints of a range as needed to avoid singularities.
*vmin*, *vmax*
the initial endpoints.
*tiny*
threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
*expander*
fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
*increasing*: [True | False]
If True (default), swap *vmin*, *vmax* if *vmin* > *vmax*
Returns *vmin*, *vmax*, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0,
returns -*expander*, *expander*.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| mit |
schets/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 14 | 6123 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
jblackburne/scikit-learn | doc/sphinxext/sphinx_gallery/notebook.py | 9 | 3565 | # -*- coding: utf-8 -*-
r"""
============================
Parser for Jupyter notebooks
============================
Class that holds the Ipython notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
import json
import os
import re
import sys
def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text
class Notebook(object):
"""Ipython notebook object
Constructs the file cell-by-cell and writes it at the end"""
def __init__(self, file_name, target_dir):
"""Declare the skeleton of the notebook
Parameters
----------
file_name : str
original script file name, .py extension will be renamed
target_dir: str
directory where notebook file is to be saved
"""
self.file_name = file_name.replace('.py', '.ipynb')
self.write_file = os.path.join(target_dir, self.file_name)
self.work_notebook = ipy_notebook_skeleton()
self.add_code_cell("%matplotlib inline")
def add_code_cell(self, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
self.work_notebook["cells"].append(code_cell)
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell)
def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
json.dump(self.work_notebook, out_nb, indent=2)
| bsd-3-clause |
sonalranjit/SECS | SECS_trace.py | 2 | 1609 | __author__ = 'sonal'
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
from math import *
def polar_plot(grid, title):
#z = grid[:,8]
u = grid[:,8]
v = grid[:,9]
plt.figure(figsize=(18,18))
ax = plt.gca()
#m = Basemap(projection='npaeqd',boundinglat=20,lon_0=-100.,resolution='l')
m = Basemap(width=8000000, height=8000000, resolution='l', projection='lcc',\
lat_0=60,lon_0=-100.)
m.drawcoastlines()
m.drawparallels(np.arange(-80.,81,20.),labels=[1,0,0,0],fontsize=10)
m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10)
x,y =m(grid[:,7],grid[:,6])
sc = m.scatter(x,y,s=abs(u),c=u,marker=',',cmap=cm.jet,alpha=0.9,edgecolors='none')
plt.title(title)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cb1 = plt.colorbar(sc,cax=cax)
cb1.set_label("mA/m",fontsize=18)
plt.savefig('GOCE_asc_EICSu_krigged_201104.png',bbox_inches='tight',pad_inches=0.2)
#plt.show()
def asc_desc(data):
asc = []
desc = []
lat = data[:,6]
for i in range(0,len(data)-1):
if lat[i+1] >= lat[i]:
asc.append(i)
else:
desc.append(i)
return asc, desc
SECS_data = np.loadtxt('EICS_201103_krigged.txt')
asc_idx, desc_idx= asc_desc(SECS_data)
asc_track = SECS_data[asc_idx,:]
desc_track = SECS_data[desc_idx,:]
polar_plot(asc_track,'GOCE Ascending EICS u component Krigged April, 2011') | gpl-2.0 |
rjonnal/zernike | __init__.py | 1 | 20006 | """This module contains functions for Zernike calculations. Mainly the private
function _zgen, a generator function for Zernike polynomials. The public
functions make use of _zgen to create height or slope maps in a unit
pupil, corresponding to individual Zernike terms.
Author: Ravi S. Jonnal / Werner Lab, UC Davis
Revision: 2.0 / 28 June 2014
"""
import numpy as np
from matplotlib import pyplot as plt
import sys
from time import sleep
import os
USE_CACHE_FILE = False
def fact(num):
"""Implementation of factorial function.
"""
# Check that the number is an integer.
assert(num%1==0)
# Check that $num\geq 0$.
assert(num>=0)
# Compute $num!$ recursively.
if num==0 or num==1:
return 1
else:
return num * fact(num-1)
def choose(a,b):
"""Binomial coefficient, implemented using
this module's factorial function.
See [here](http://www.encyclopediaofmath.org/index.php/Newton_binomial) for detail.
"""
assert(a>=b)
return fact(a)/(fact(b)*fact(a-b))
def splitEquation(eqStr,width,bookend):
if len(eqStr)<=width or len(eqStr)==0:
return eqStr
else:
spaceIndices = []
idx = 0
while idx>-1:
idx = eqStr.find(' ',idx+1)
spaceIndices.append(idx)
spaceIndices = spaceIndices[:-1]
idxList = [x for x in spaceIndices if x<width]
if len(idxList)==0:
return eqStr
else:
idx = idxList[-1]
head = eqStr[:idx]
innards = ' ' + bookend + '\n' + bookend
tail = splitEquation(eqStr[idx:],width,bookend)
test =head + innards + tail
return test
class Zernike:
def __init__(self):
if USE_CACHE_FILE:
cachedir = './cache/'
self._cachefn = os.path.join(cachedir,'zernike_cache.txt')
if not os.path.exists(cachedir):
os.makedirs(cachedir)
try:
self._termMatrix = np.loadtxt(self._cachefn).astype(np.int32)
except Exception as e:
print 'No term cache file. Creating.'
self._termMatrix = np.array([])
np.savetxt(self._cachefn,self._termMatrix)
# Make a dictionary of precomputed coefficients, using the cache file.
# This dictionary will be used to look up values when they exist in
# the dictionary, and will recompute them otherwise.
self._termDict = {}
if USE_CACHE_FILE:
for row in self._termMatrix:
n,m,kindIndex,s,j,k = row[:6]
t1,t2,t3,c,tXexp,tYexp = row[6:]
self._termDict[(n,m,kindIndex,s,j,k)] = (t1,t2,t3,c,tXexp,tYexp)
# The functions in this class can be asked for phase height,
# or partial x or partial y derivatives. 'Kind' refers to
# which of these is requested. Numerical encodings for 'kind'
# permit some arithmetical simplicity and generality
# (utilizing a number associated with the kind in a single
# equation, rather than having different sets of equations
# for each kind case).
self._kindDictionary = {}
self._kindDictionary['h'] = 0
self._kindDictionary['dx'] = 1
self._kindDictionary['dy'] = 2
def j2nm(self,j):
n = np.ceil((-3+np.sqrt(9+8*j))/2)
m = 2*j-n*(n+2)
return np.int(n),np.int(m)
def nm2j(self,n,m):
return np.int(n*(n+1)/2.0+(n+m)/2.0)
def _zeqn(self,n,m,kind='h',forceRecompute=False):
"""Return parameters sufficient for specifying a Zernike term
of desired order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x-
and y- rectangular (Cartesian) coordinates, produce parameters
necessary for constructing the appropriate Zernike
representation.
An individual polynomial has the format:
$$ Z_n^m = \sqrt{c} \Sigma^j\Sigma^k [a_{jk}X^jY^k] $$
This function returns a tuple ($c$,cdict). $c$ is the square
of the normalizing coefficient $\sqrt{c}$, and cdict contains
key-value pairs (($j$,$k$),$a$), mapping the $X$ and $Y$
exponents ($j$ and $k$, respectively) onto polynomial term
coefficients ($a$). The resulting structure can be used to
compute the wavefront height or slope for arbitrary pupil
coordinates, or to generate string representations of the
polynomials.
Zernike terms are only defined when n and m have the same
parity (both odd or both even).
Please see Schwiegerling lecture notes in
/doc/supporting_docs/ for eqn. references.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Returns:
params (tuple): (c,cdict), with c being the normalizing
coefficient c and cdict being the map of exponent pairs
onto inner coefficients.
"""
absm = np.abs(m)
kindIndex = self._kindDictionary[kind.lower()]
if USE_CACHE_FILE:
# open cache file in append mode:
self._cacheHandle = file(self._cachefn,'a')
# check that n and m are both even or both odd
if (float(n-absm))%2.0:
errString = 'zernike._zgen error: ' + \
'parity of n and m are different; n = %d, m = %d'%(n,m)
sys.exit(errString)
# check that n is non-negative:
if n<0:
errString = 'zernike._zgen error: ' + \
'n must be non-negative; n = %d'%n
sys.exit(errString)
# $|m|$ must be less than or equal to $n$.
if abs(m)>n:
errString = 'zernike._zgen error: ' + \
'|m| must be less than or equal to n, but n=%d and m=%d.'%(n,m)
sys.exit(errString)
# These are the squares of the outer coefficients. It's useful
# to keep them this way for _convertToString, since we'd
# prefer to print the $\sqrt{}$ rather than a truncated irrational
# number.
if m==0:
outerCoef = n+1
else:
outerCoef = 2*(n+1)
srange = range((n-absm)/2+1)
cdict = {}
for s in srange:
jrange = range(((n-absm)/2)-s+1)
for j in jrange:
# Subtract 1 from absm to determine range,
# only when m<0.
if m<0:
krange = range((absm-1)/2+1)
else:
krange = range(absm/2+1)
for k in krange:
# If m==0, k must also be 0;
# see eqn. 13c, 19c, and 20c, each of which
# only sum over s and j, not k.
if m==0:
assert(k==0)
# For m==0 cases, n/2 is used in coef denominator. Make
# sure that n is even, or else n/2 is not well-defined
# because n is an integer.
if m==0:
assert n%2==0
# Check to see if calculations are cached.
# If so, use cached values; if not, recalculate.
cached = self._termDict.has_key((n,m,kindIndex,s,j,k))
if cached and not forceRecompute:
t1,t2,t3,c,tXexp,tYexp = self._termDict[(n,m,kindIndex,s,j,k)]
else:
# The coefficient for each term in this
# polynomial has the format: $$\frac{t1n}{t1d1
# t1d2 t1d3} t2 t3$$. These six terms are
# computed here.
t1n = ((-1)**(s+k))*fact(n-s)
t1d1 = fact(s)
t1d2 = fact((n + absm)/2-s)
t1d3 = fact((n - absm)/2-s)
t1 = t1n/(t1d1*t1d2*t1d3)
t2 = choose((n - absm)/2 - s, j)
t3 = choose(absm, 2*k + (m<0))
if kind.lower()=='h':
# The (implied) coefficient of the $X^a Y^b$
# term at the end of eqns. 13a-c.
c = 1
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dx':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 19a-c.
c = (n - 2*(s+j+k) - (m<0))
# Could cacluate explicitly:
# $tXexp = X^{(n - 2*(s+j+k)- 1 - (m<0))}$
#
# However, piggy-backing on previous
# calculation of c speeds things up.
tXexp = c - 1
tYexp = 2*(j+k) + (m<0)
elif kind.lower()=='dy':
# The coefficient of the $X^a Y^b$ term at
# the end of eqns. 20a-c.
c = 2*(j+k) + (m<0)
tXexp = n - 2*(s+j+k) - (m<0)
tYexp = c - 1
else:
errString = 'zernike._zgen error: ' + \
'invalid kind \'%s\'; should be \'h\', \'dx\', or \'dy\'.'%kind
sys.exit(errString)
if not cached and USE_CACHE_FILE:
self._cacheHandle.write('%d\t'*12%(n,m,kindIndex,s,j,k,t1,t2,t3,c,tXexp,tYexp)+'\n')
ct123 = c*t1*t2*t3
# The key for the polynomial dictionary is the pair of X,Y
# coefficients.
termKey = (tXexp,tYexp)
# Leave this term out of the dictionary if its coefficient
# is 0.
if ct123:
# If we already have this term, add to its coefficient.
if cdict.has_key(termKey):
cdict[termKey] = cdict[termKey] + ct123
# If not, add it to the dictionary.
else:
cdict[termKey] = ct123
# Remove zeros to speed up computations later.
cdict = {key: value for key, value in cdict.items() if value}
return (outerCoef,cdict)
def _convertToString(self,params):
"""Return a string representation of a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and returns a string
representation of the polynomial, with LaTeX- style markup.
Example: a params of (10, {(3,4): 7, (2,5): -1}) would produce a
two-term polynomial '\sqrt{10} [7 X^3 Y^4 - X^2 Y^5]', which could be used in LaTeX,
pandoc, markdown, MathJax, or Word with MathType, to produce:
$$ \sqrt{10} [7 X^3 Y^4 - X^2 Y^5] $$
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
Returns:
string: A string representation of the polynomial.
"""
c = params[0]
cdict = params[1]
keys = sorted(cdict.keys(), key=lambda tup: (tup[0]+tup[1],tup[0]))[::-1]
outstr = ''
firstKey = True
for key in keys:
coef = cdict[key]
if coef>0:
sign = '+'
else:
sign = '-'
coef = abs(coef)
if coef<0 or not firstKey:
outstr = outstr + '%s'%sign
if coef>1 or (key[0]==0 and key[1]==0):
outstr = outstr + '%d'%coef
if key[0]:
outstr = outstr + 'X^{%d}'%key[0]
if key[1]:
outstr = outstr + 'Y^{%d}'%key[1]
firstKey = False
outstr = outstr + ' '
outstr = outstr.strip()
if np.sqrt(float(c))%1.0<.00001:
cstr = '%d'%(np.sqrt(c))
else:
cstr = '\sqrt{%d}'%(c)
if len(outstr):
outstr = '%s [%s]'%(cstr,outstr)
else:
outstr = '%s'%(cstr)
return outstr
def _convertToSurface(self,params,X,Y,mask=None):
"""Return a phase map specified by a Zernike polynomial.
This function takes a tuple, consisting of a squared
normalizing coefficient and dictionary of inner coefficients
and exponents, provided by _zeqn, and x- and y- rectangular
(Cartesian) coordinates, and produces a phase map.
This function works by evaluating the polynomial expressed by
params at each coordinate specified by X and Y.
Args:
params (tuple): A pair consisting of an outer coefficient
$c$ and a dictionary mapping tuples (xexp,yexp) of
exponents onto the corresponding term coefficients.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not (X.shape[0]==Y.shape[0] and \
X.shape[1]==Y.shape[1]):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * np.sqrt(normalizer)
matrix_out = matrix_out * mask
return matrix_out
def getSurface(self,n,m,X,Y,kind='h',mask=None):
"""Return a phase map specified by a Zernike order and azimuthal frequency.
Given an order (or degree) n and azimuthal frequency f, and x- and y-
rectangular (Cartesian) coordinates, produce a phase map of either height,
partial x derivative, or partial y derivative.
Zernike terms are only defined when n and m have the same parity (both odd
or both even).
The input X and Y values should be located inside a unit pupil, such that
$$\sqrt{X^2 + Y^2}\leq 1$$
Please see Schwiegerling lecture notes in /doc/supporting_docs/ for eqn.
references.
This function works by calling Zernike._zeqn to calculate the coefficients
and exponents of the polynomial, and then using the supplied X and Y
coordinates to produce the height map (or partial derivative).
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
X (float): A scalar, vector, or matrix of X coordinates in unit pupil.
Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil.
kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope)
or partial y derivative, respectively.
Returns:
float: height, dx, or dy; returned structure same size as X and Y.
"""
# Check that shapes of X and Y are equal (not necessarily square).
if not np.all(X.shape==Y.shape):
errString = 'zernike.getSurface error: ' + \
'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \
'and Y is %d x %d'%(Y.shape[0],Y.shape[1])
sys.exit(errString)
if mask is None:
mask = np.ones(X.shape)
params = self._zeqn(n,m,kind)
normalizer = np.sqrt(params[0])
matrix_out = np.zeros(X.shape)
for item in params[1].items():
matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1])
matrix_out = matrix_out * normalizer
matrix_out = matrix_out * mask
return matrix_out
def getEquationString(self,n,m,kind='h',doubleDollar=False):
"""Return LaTeX-encoded of the Zernike polynomial specified by
order n, frequency m.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
doubleDollar (bool): determines how to bookend the
polynomial string; True causes bookending with '$$', to
produce "display" math mode, whereas False would produce
a string suitable for inline use.
Returns:
str: a LaTeX representation of the Zernike polynomial
specified by n, m, and Kind.
"""
params = self._zeqn(n,m,kind)
rightString = self._convertToString(params)
if kind.lower()=='h':
leftString = 'Z^{%d}_{%d}'%(m,n)
elif kind.lower()=='dx':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta x}'%(m,n)
elif kind.lower()=='dy':
leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta y}'%(m,n)
else:
sys.exit('zernike.getEquationString: invalid kind %s'%kind)
if doubleDollar:
bookend = '$$'
else:
bookend = '$'
return '%s %s = %s %s'%(bookend,leftString,rightString,bookend)
def plotPolynomial(self,n,m,kind='h'):
"""Plot a polynomial surface specified by order n, frequency m, and kind.
Args:
n (int): The Zernike order or degree.
m (int): The azimuthal frequency.
kind (str): 'h', 'dx', or 'dy', for height, partial x
derivative (slope) or partial y derivative,
respectively.
Calling function/script required to provide a plotting context (e.g. pyplot.figure).
"""
from mpl_toolkits.mplot3d import Axes3D
N = 64
mask = np.zeros((N,N))
xx,yy = np.meshgrid(np.linspace(-1,1,N),np.linspace(-1,1,N))
d = np.sqrt(xx**2 + yy**2)
mask[np.where(d<1)] = 1
surface = self.getSurface(n,m,xx,yy,kind,mask)
surface = surface * mask
#plt.figure()
ax = plt.axes([0,.2,1,.8],projection='3d')
surf = ax.plot_wireframe(xx,yy,surface,rstride=1,cstride=1,color='k')
ax.view_init(elev=70., azim=40)
eqstr = self.getEquationString(n,m,kind)
eqstr = splitEquation(eqstr,160,'$')
print 'plotting %s'%eqstr
plt.axes([0,0,1,.2])
plt.xticks([])
plt.yticks([])
plt.box('off')
fontsize = 12
plt.text(0.5,0.5,eqstr,ha='center',va='center',fontsize=fontsize)
| gpl-2.0 |
palashahuja/pgmpy | pgmpy/estimators/MLE.py | 2 | 3259 | from pgmpy.estimators import BaseEstimator
from pgmpy.factors import TabularCPD
from pgmpy.models import BayesianModel
import numpy as np
class MaximumLikelihoodEstimator(BaseEstimator):
"""
Class used to compute parameters for a model using Maximum Likelihood Estimate.
Parameters
----------
model: pgmpy.models.BayesianModel or pgmpy.models.MarkovModel or pgmpy.models.NoisyOrModel
model for which parameter estimation is to be done
data: pandas DataFrame object
datafame object with column names same as the variable names of the network
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> estimator = MaximumLikelihoodEstimator(model, values)
"""
def __init__(self, model, data):
if not isinstance(model, BayesianModel):
raise NotImplementedError("Maximum Likelihood Estimate is only implemented of BayesianModel")
super().__init__(model, data)
def get_parameters(self):
"""
Method used to get parameters.
Returns
-------
parameters: list
List containing all the parameters. For Bayesian Model it would be list of CPDs'
for Markov Model it would be a list of factors
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> estimator = MaximumLikelihoodEstimator(model, values)
>>> estimator.get_parameters()
"""
parameters = []
for node in self.model.nodes():
parents = self.model.get_parents(node)
if not parents:
state_counts = self.data.ix[:, node].value_counts()
cpd = TabularCPD(node, self.node_card[node],
state_counts.values[:, np.newaxis])
cpd.normalize()
parameters.append(cpd)
else:
parent_card = np.array([self.node_card[parent] for parent in parents])
var_card = self.node_card[node]
state_counts = self.data.groupby([node] + self.model.predecessors(node)).count()
values = state_counts.iloc[:, 0].reshape(var_card,
np.product(parent_card))
cpd = TabularCPD(node, var_card, values,
evidence=parents,
evidence_card=parent_card.astype('int'))
cpd.normalize()
parameters.append(cpd)
return parameters
| mit |
B3AU/waveTree | examples/ensemble/plot_bias_variance.py | 6 | 7330 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in xrange(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
alorenzo175/pvlib-python | pvlib/test/test_forecast.py | 1 | 5733 | from datetime import datetime, timedelta
from pytz import timezone
import warnings
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from conftest import requires_siphon, has_siphon, skip_windows
pytestmark = pytest.mark.skipif(not has_siphon, reason='requires siphon')
if has_siphon:
with warnings.catch_warnings():
# don't emit import warning
warnings.simplefilter("ignore")
from pvlib.forecast import GFS, HRRR_ESRL, HRRR, NAM, NDFD, RAP
# setup times and location to be tested. Tucson, AZ
_latitude = 32.2
_longitude = -110.9
_tz = 'US/Arizona'
_start = pd.Timestamp.now(tz=_tz)
_end = _start + pd.Timedelta(days=1)
_modelclasses = [
GFS, NAM, HRRR, NDFD, RAP,
pytest.param(
HRRR_ESRL, marks=[
skip_windows,
pytest.mark.xfail(reason="HRRR_ESRL is unreliable"),
pytest.mark.timeout(timeout=60),
pytest.mark.filterwarnings('ignore:.*experimental')])]
_working_models = []
_variables = ['temp_air', 'wind_speed', 'total_clouds', 'low_clouds',
'mid_clouds', 'high_clouds', 'dni', 'dhi', 'ghi']
_nonnan_variables = ['temp_air', 'wind_speed', 'total_clouds', 'dni',
'dhi', 'ghi']
else:
_modelclasses = []
# make a model object for each model class
# get the data for that model and store it in an
# attribute for further testing
@requires_siphon
@pytest.fixture(scope='module', params=_modelclasses)
def model(request):
amodel = request.param()
try:
raw_data = amodel.get_data(_latitude, _longitude, _start, _end)
except Exception as e:
warnings.warn('Exception getting data for {}.\n'
'latitude, longitude, start, end = {} {} {} {}\n{}'
.format(amodel, _latitude, _longitude, _start, _end, e))
raw_data = pd.DataFrame() # raw_data.empty will be used later
amodel.raw_data = raw_data
return amodel
@requires_siphon
def test_process_data(model):
for how in ['liujordan', 'clearsky_scaling']:
if model.raw_data.empty:
warnings.warn('Could not test {} process_data with how={} '
'because raw_data was empty'.format(model, how))
continue
data = model.process_data(model.raw_data, how=how)
for variable in _nonnan_variables:
try:
assert not data[variable].isnull().values.any()
except AssertionError:
warnings.warn('{}, {}, data contained null values'
.format(model, variable))
@requires_siphon
def test_bad_kwarg_get_data():
# For more information on why you would want to pass an unknown keyword
# argument, see Github issue #745.
amodel = NAM()
data = amodel.get_data(_latitude, _longitude, _start, _end,
bad_kwarg=False)
assert not data.empty
@requires_siphon
def test_bad_kwarg_get_processed_data():
# For more information on why you would want to pass an unknown keyword
# argument, see Github issue #745.
amodel = NAM()
data = amodel.get_processed_data(_latitude, _longitude, _start, _end,
bad_kwarg=False)
assert not data.empty
@requires_siphon
def test_how_kwarg_get_processed_data():
amodel = NAM()
data = amodel.get_processed_data(_latitude, _longitude, _start, _end,
how='clearsky_scaling')
assert not data.empty
@requires_siphon
def test_vert_level():
amodel = NAM()
vert_level = 5000
amodel.get_processed_data(_latitude, _longitude, _start, _end,
vert_level=vert_level)
@requires_siphon
def test_datetime():
amodel = NAM()
start = datetime.now()
end = start + timedelta(days=1)
amodel.get_processed_data(_latitude, _longitude, start, end)
@requires_siphon
def test_queryvariables():
amodel = GFS()
new_variables = ['u-component_of_wind_height_above_ground']
data = amodel.get_data(_latitude, _longitude, _start, _end,
query_variables=new_variables)
data['u-component_of_wind_height_above_ground']
@requires_siphon
def test_latest():
GFS(set_type='latest')
@requires_siphon
def test_full():
GFS(set_type='full')
@requires_siphon
def test_temp_convert():
amodel = GFS()
data = pd.DataFrame({'temp_air': [273.15]})
data['temp_air'] = amodel.kelvin_to_celsius(data['temp_air'])
assert_allclose(data['temp_air'].values, 0.0)
# @requires_siphon
# def test_bounding_box():
# amodel = GFS()
# latitude = [31.2,32.2]
# longitude = [-111.9,-110.9]
# new_variables = {'temperature':'Temperature_surface'}
# data = amodel.get_query_data(latitude, longitude, _start, _end,
# variables=new_variables)
@requires_siphon
def test_set_location():
amodel = GFS()
latitude, longitude = 32.2, -110.9
time = datetime.now(timezone('UTC'))
amodel.set_location(time, latitude, longitude)
def test_cloud_cover_to_transmittance_linear():
amodel = GFS()
assert_allclose(amodel.cloud_cover_to_transmittance_linear(0), 0.75)
assert_allclose(amodel.cloud_cover_to_transmittance_linear(100), 0.0)
assert_allclose(amodel.cloud_cover_to_transmittance_linear(0, 0.5), 0.5)
def test_cloud_cover_to_ghi_linear():
amodel = GFS()
ghi_clear = 1000
offset = 25
out = amodel.cloud_cover_to_ghi_linear(0, ghi_clear, offset=offset)
assert_allclose(out, 1000)
out = amodel.cloud_cover_to_ghi_linear(100, ghi_clear, offset=offset)
assert_allclose(out, 250)
| bsd-3-clause |
abele/bokeh | examples/plotting/file/boxplot.py | 43 | 2269 | import numpy as np
import pandas as pd
from bokeh.plotting import figure, show, output_file
# Generate some synthetic time series for six different categories
cats = list("abcdef")
yy = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
yy[g == l] += i // 2
df = pd.DataFrame(dict(score=yy, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q3 + 1.5*iqr
lower = q1 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_file("boxplot.html")
p = figure(tools="save", background_fill="#EFE8E2", title="", x_range=cats)
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black")
p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black")
# boxes
p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whiskers (almost-0 height rects simpler than segments)
p.rect(cats, lower.score, 0.2, 0.01, line_color="black")
p.rect(cats, upper.score, 0.2, 0.01, line_color="black")
# outliers
p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = "white"
p.grid.grid_line_width = 2
p.xaxis.major_label_text_font_size="12pt"
show(p)
| bsd-3-clause |
RuthAngus/LSST-max | code/GP_periodogram.py | 1 | 1066 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from GProtation import make_plot, lnprob, neglnlike
import emcee
import time
import george
from george.kernels import ExpSquaredKernel, ExpSine2Kernel
import scipy.optimize as spo
def GP_periodogram(x, y, yerr, p_init, plims, N):
"""
This function takes a light curves and attempts to produce a GP periodogram.
It returns the value of the highest peak.
The kernel hyperparameters are optimised over a grid of periods.
This is also a "profile likelihood".
x, y, yerr: the light curve.
p_init: the initial guess for the period.
plims: the (log) boundaries for the grid.
N: the number of grid points.
"""
# create the grid
periods = np.linspace(np.exp(plims[0], np.exp(plims[1], 10)
# initial hyperparameters
if __name__ == "__main__":
# fake data
x = np.arange(0, 10, 100)
p = 2
err = .1
y = np.sin(2*np.pi*(1./p)*x) + np.random.randn(100)*err
yerr = np.ones_like(y) * err
p_init, plims = 2, np.log(.1, 5)
GP_periodogram(x, y, yerr, p_init, plims, 10)
| mit |
locksmithone/qcnsim | tag/20140102/doc/validations/weibull/weibullGenerator.py | 3 | 3655 | import numpy
#import scipy
import matplotlib.pyplot
import math
def weibullGenerator(scale, shape, start, end, step):
'''
Generates Weibull sample lists per the parameters.
Returns two lists of X and Y values distributed per Weibull.
'''
weibullSamplesY = []
weibullSamplesX = []
for i in numpy.arange(start, end, step):
weibullSamplesY.append((shape/scale)*((i/scale)**(shape-1.0))*(math.exp(-(i/scale)**shape)))
weibullSamplesX.append(i)
return weibullSamplesX, weibullSamplesY
def readValuesFromFile(filename):
'''
Reads values from a file and returns a list of floats.
'''
yValues = [] # Y values to be read from file.
fileHandle = open(filename, 'r') # Opens file for reading.
#yValues= list(fileHandle) # Read all values into yValues.
for line in fileHandle:
yValues.append(float(line.rstrip()))
fileHandle.close()
return yValues
# Now construct a map of parameters per Weibull samples.
# Key is filename with samples, value is list of lists:
# list1 is weibullGenerator parameters to generate a Weibull graph from matplotlib,
# list2 is set of parameters to plot the samples from the filename.
weibullParameterMap = {
'weibull_Scale1.0_Shape5.0.csv': [[1.0,5.0,0.0,3.0,.01], [0.0,3.0,0.0,2.0]],
'weibull_Scale1.0_Shape1.0.csv': [[1.0,1.0,0.0,3.0,.01], [0.0,3.0,0.0,1.0]],
'weibull_Scale1.0_Shape2.0.csv': [[1.0,2.0,0.0,3.0,.01], [0.0,3.0,0.0,0.9]],
'weibull_Scale1.0_Shape0.5.csv': [[1.0,0.5,0.0,3.0,.01], [0.0,3.0,0.0,5.0]]
}
# Iterate through dictionary and generate graphs.
for filename, parameters in weibullParameterMap.items():
weibullSamplesX, weibullSamplesY = weibullGenerator(parameters[0][0], parameters[0][1],
parameters[0][2], parameters[0][3],
parameters[0][4])
print("Parameters: ", parameters)
matplotlib.pyplot.figure()
matplotlib.pyplot.plot(weibullSamplesX, weibullSamplesY)
matplotlib.pyplot.grid(True)
matplotlib.pyplot.xlabel("x values")
matplotlib.pyplot.ylabel("Probability")
matplotlib.pyplot.title('Weibull pdf: ' + str(parameters[0]))
matplotlib.pyplot.savefig(filename + '_pdf.png')
matplotlib.pyplot.show()
#matplotlib.pyplot.close()
ySamples = readValuesFromFile(filename)
matplotlib.pyplot.figure()
matplotlib.pyplot.hist(ySamples, bins=300, range=(parameters[1][0], parameters[1][1]),
normed=True, color='r')
matplotlib.pyplot.axis(parameters[1])
matplotlib.pyplot.grid(True)
matplotlib.pyplot.xlabel("x values")
matplotlib.pyplot.ylabel("Probability")
matplotlib.pyplot.title('Samples from ' + filename)
matplotlib.pyplot.savefig(filename + '_sample.png')
matplotlib.pyplot.show()
#matplotlib.pyplot.close()
print("*** Done! ***")
##print (weibullGenerator(1.0,5.0,0.0,3.0,.001))
#weibullSamplesX, weibullSamplesY = weibullGenerator(1.0,5.0,0.0,3.0,.001)
#matplotlib.pyplot.figure()
##matplotlib.pyplot.hist(weibullSamplesY, 100)
#matplotlib.pyplot.plot(weibullSamplesX, weibullSamplesY)
#matplotlib.pyplot.grid(True)
#matplotlib.pyplot.show()
##matplotlib.pyplot.close()
#print("Done Figure 1.")
#ySamples = readValuesFromFile('weibull_Scale1.0_Shape5.0.csv')
#print("Done reading.")
##print (ySamples)
#matplotlib.pyplot.figure()
#matplotlib.pyplot.hist(ySamples, 500)
#matplotlib.pyplot.axis([0.0,3.0,0.0,650])
#matplotlib.pyplot.grid(True)
#matplotlib.pyplot.show()
##matplotlib.pyplot.close()
#print("Done Figure 2.")
| lgpl-2.1 |
toastedcornflakes/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
ccd-utexas/OLD-MAID | ProEMOnline.py | 2 | 57911 | # -*- coding: utf-8 -*-
"""
This scripts sets an initial layout for the ProEMOnline software. It uses the
PyQtGraph dockarea system and was designed from the dockarea.py example.
Keaton wrote this.
"""
#Import everything you'll need
from __future__ import absolute_import, division
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pickle #for saving layouts
from functools import partial
from glob import glob
from scipy import stats
from scipy.optimize import curve_fit
from scipy.fftpack import fft,fftfreq
import pandas as pd
import os
import subprocess
import csv
import sys
import time
import datetime as dt
import dateutil.parser
from astropy.io import fits
from scipy.interpolate import interp1d
import scipy.ndimage.filters as filters
from astropy.stats import biweight_location, biweight_midvariance
from photutils import daofind
from photutils import CircularAperture, CircularAnnulus, aperture_photometry
from pyqtgraph.dockarea import *
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
# Local modules.
import read_spe
#Return a string of the current time
def timestring():
date = dt.datetime.now()
return date.strftime('%Y%m%d_%Hh%Mm%Ss')
#Function to save a screenshot
def saveScreenshot():
ssfilename=os.path.splitext(spefile)[0]+'_'+timestring()+'.png'
log("Writing screenshot to file "+ssfilename,2)
p=QtGui.QPixmap.grabWidget(area)
writeout = p.save(ssfilename, 'png')
if not writeout: log("Saving screenshot failed!",3)
#### BEGIN PROGRAM ####
#The organization and behavoir of the program are as follows:
#This program operates in four stages.
#Stage 0 - Program Initialized, waiting to open SPE file.
#Stage 1 - SPE file open, stars are being selected
#Stage 2 - Online data reduction and aperture photometry/plotting is being done.
#Stage 3 - End of data acquisition detected. Final data written to file. Timestamps verified. Log saved. Weather/time log data saved.
# -> revert back to Stage 0.
stage=0 #start at 0
def stagechange(num):
global stage
if num in range(4):
log("Program stage = "+str(num),1)
stage=num
else: log("Attempt to change stage to invalid value ("+str(num)+")",3)
#### STAGE 0 ####
#Set up the general GUI aspects
defaultdir = 'D:/sync_to_White_Dwarf_Archive/'#where to search for SPE files
#Set up main window with menu items
class WithMenu(QtGui.QMainWindow):
def __init__(self):
super(WithMenu, self).__init__()
self.initUI()
def initUI(self):
#SETUP THE MENUBAR!
#Note: Exit is protected on Mac. This works on Windows.
exitAction = QtGui.QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
#Open SPE
openFile = QtGui.QAction('&Open SPE', self)
openFile.setShortcut('Ctrl+O')
openFile.setStatusTip('Open SPE File')
#openFile.setCheckable(True)
openFile.triggered.connect(self.openSPE)
#Run Photometry
runPhot = QtGui.QAction('&Run Photometry', self)
runPhot.setShortcut('Ctrl+R')
runPhot.setStatusTip('Run Aperture Photometry on Frames')
runPhot.triggered.connect(self.run)
#Update FT
updateFT = QtGui.QAction('&Update FT', self)
updateFT.setShortcut('Ctrl+U')
updateFT.setStatusTip('Update Fourier Transform with Current Light Curve')
updateFT.triggered.connect(self.updateFTfunct)
#Run Autoguider
autoguide = QtGui.QAction('Feed to &Autoguider', self)
autoguide.setShortcut('Ctrl+A')
autoguide.setStatusTip('Send most recently acquired frame to Guide82')
autoguide.triggered.connect(self.toAutoguider)
#Load dark for science frames
loadDark = QtGui.QAction('Load Darks', self)
loadDark.setStatusTip('Open SPE Calibrations for Dark Subtracting Science Images')
loadDark.triggered.connect(self.openDark)
#Load dark for flat frames
loadDarkForFlats = QtGui.QAction('Load Darks for Flats', self)
loadDarkForFlats.setStatusTip('Open SPE Calibrations for Dark Subtracting Flat Images')
loadDarkForFlats.triggered.connect(self.openDarkForFlats)
#Load flat
loadFlat = QtGui.QAction('Load Flats', self)
loadFlat.setStatusTip('Open SPE Calibrations for Flatfielding Science Images')
loadFlat.triggered.connect(self.openFlat)
#Restore points
restorePoints = QtGui.QAction('Restore Points', self)
restorePoints.setStatusTip('Return All Previously Discarded Points to the Light Curve.')
restorePoints.triggered.connect(self.restorePts)
#undo recenly selected bad point
undo = QtGui.QAction('Undo Bad Point Selection', self)
undo.setShortcut('Ctrl+Z')
undo.setStatusTip('Return Most Recently Discarded Point to the Light Curve.')
undo.triggered.connect(self.undoBad)
#Save Layout
saveLayout = QtGui.QAction('Save Layout', self)
saveLayout.setStatusTip('Save the current dock layout')
saveLayout.triggered.connect(self.saveLayout)
#Load Layout
loadLayout = QtGui.QAction('Load Layout', self)
loadLayout.setStatusTip('Load a saved dock layout')
loadLayout.triggered.connect(self.loadLayout)
#changeSmoothing
changeSmoothing = QtGui.QAction('Change Smoothing', self)
changeSmoothing.setStatusTip('Change Light Curve Smoothing Parameters.')
changeSmoothing.triggered.connect(self.changeSmooth)
#save screenshot
screenshot = QtGui.QAction('Save Screenshot', self)
screenshot.setStatusTip('Save a Screenshot of the Main Window.')
savescreenshot = partial(saveScreenshot)
screenshot.triggered.connect(savescreenshot)
#Menubar
menubar = self.menuBar()
#File Menu
fileMenu = menubar.addMenu('File')
fileMenu.addAction(openFile)
fileMenu.addAction(runPhot)
fileMenu.addAction(updateFT)
fileMenu.addAction(autoguide)
fileMenu.addAction(exitAction)
#Calibrations Menu
calibrationsMenu = menubar.addMenu('Calibrations')
calibrationsMenu.addAction(loadDark)
calibrationsMenu.addAction(loadDarkForFlats)
calibrationsMenu.addAction(loadFlat)
#Interactions menu
interactionsMenu = menubar.addMenu('Interact')
interactionsMenu.addAction(restorePoints)
interactionsMenu.addAction(undo)
self.changeApertureMenu = interactionsMenu.addMenu('Select Aperture Size')
self.changeCompStarMenu = interactionsMenu.addMenu('Select Comp Star for Division')
interactionsMenu.addAction(changeSmoothing)
#Layout Menu
layoutMenu = menubar.addMenu('Layout')
layoutMenu.addAction(saveLayout)
layoutMenu.addAction(loadLayout)
#Output Menu
outputMenu = menubar.addMenu('Output')
outputMenu.addAction(screenshot)
#Functions to save and load layouts
layoutsDir = './layouts/'
layoutsExt = '.p'
def saveLayout(self):
layoutName, ok = QtGui.QInputDialog.getText(self, 'Save layout',
'Enter name for this layout:')
if ok:
#Save dict in pickle format
pickle.dump( area.saveState(), open( self.layoutsDir+layoutName+self.layoutsExt, "wb" ) )
def loadLayout(self):
layouts = glob(self.layoutsDir+'*'+self.layoutsExt)
if len(layouts) == 0:
_ = QtGui.QMessageBox.warning(self,'Load layout','No saved layouts found.')
else:
layouts = [layout[len(self.layoutsDir):-1*len(self.layoutsExt)] for layout in layouts]
layout, ok = QtGui.QInputDialog().getItem(self,'Load layout','Select layout: ',layouts)
if ok:
state = pickle.load(open(self.layoutsDir+layout+self.layoutsExt, "rb" ) )
area.restoreState(state)
#Function to open SPE files to operate on.
def openSPE(self):
'''
Select a new target SPE file to work on.
Open dialog box, select file, verify that it is a SPE file.
'''
global defaultdir,rundir
fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE file',
defaultdir,filter='Data (*.spe)'))
if fname[-4:]=='.spe':
log("Opening file "+fname,1)
#Set the default directory to a couple levels up from this file
rundir = os.path.dirname(fname)
defaultdir = os.path.dirname(rundir)
#set target log text as filename to start
targetEdit.setText(os.path.basename(fname)[:-4])
#This needs to trigger a major chain of events
stage1(fname)
else: log("Invalid file type (must be SPE).",3)
#Update the FT at user's command
def updateFTfunct(self):
global framenum
updateft(i=framenum)
def toAutoguider(self):
if spefile != '':
log("Opening separate program to send incoming data to Guide82.",2)
subprocess.Popen(["python",os.path.join(os.path.dirname(os.path.abspath(__file__)),'toAutoguider.py'),spefile])
else:
log("Open SPE file first before trying to send data to Guide82.",3)
#Load Dark frames
def openDark(self):
global dark, darkExists, darkExp, darkDark
fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open dark file',
defaultdir,filter='Data (*.spe *.fits)'))
if fname[-4:]=='.spe':
log("Opening dark file "+fname,1)
dspe = read_spe.File(fname)
num_darks=dspe.get_num_frames()
#get all frames in SPE file
#stack as 3D numpy array
(frames,_)=dspe.get_frame(0)
frames=np.array([frames])
for i in range(1,num_darks):
(thisframe,_)=dspe.get_frame(i)
frames=np.concatenate((frames,[thisframe]),0)
dark=np.median(frames,axis=0)
darkExists = True
log("Mean dark counts: "+str(np.mean(dark)))
processframe()
displayFrame(autoscale=True,markstars=False)
#Write out master dark file as fits
#Set up header
prihdr = fits.Header()
prihdr['OBJECT'] = 'dark'
prihdr['IMAGETYP'] = 'dark'
prihdr['REDUCED'] = dt.datetime.now().isoformat()
prihdr['COMMENT'] = "Reduced by Keaton Bell's OLD MAID Software"
if hasattr(dspe, 'footer_metadata'):
footer_metadata = BeautifulSoup(dspe.footer_metadata, "xml")
ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime']
dt_begin = dateutil.parser.parse(ts_begin)
prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution'])
prihdr['DATE-OBS'] = str(dt_begin.isoformat())
prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning']
prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning']
prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model']
prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text
prihdr['COMMENT'] = "SPE file has footer metadata"
darkExp=np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.)
if darkExp != exptime:
log("Exp times for dark and science frames do not match!",3)
log("Exposure time for dark: "+str(darkExp)+" s")
prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.)
#prihdr['SOFTWARE'] = footer_metadata.find(name='Origin')
prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text
if footer_metadata.find(name='Mode').text != 'AlwaysClosed':
prihdr['WARNING'] = 'Shutter not closed for dark frame.'
log("Shutter not closed for dark frame.",3)
else:
darkDark=True
else:
prihdr['WARNING'] = "No XML footer metadata."
log("No XML footer metadata.",3)
#Set up fits object
hdu = fits.PrimaryHDU(dark,header=prihdr)
darkpath = os.path.dirname(fname)
fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits'
log("Writing master dark as "+fitsfilename)
hdu.writeto(os.path.join(darkpath, fitsfilename),clobber=True)
#Close SPE
dspe.close()
#option to load as fits
elif fname[-5:]=='.fits':
log("Opening dark file "+fname,1)
hdulist = fits.open(fname)
prihdr = hdulist[0].header
dark=hdulist[0].data
darkExp = np.round(float(prihdr['EXPTIME']))
if darkExp != exptime:
log("Exp times for dark and science frames do not match!",3)
log("Exposure time for dark: "+str(darkExp)+" s")
log("Mean dark counts: "+str(np.mean(dark)))
if prihdr['SHUTTER'] != 'AlwaysClosed':
prihdr['WARNING'] = 'Shutter not closed for dark frame.'
log("Shutter not closed for dark frame.",3)
else:
darkDark=True
darkExists = True
processframe()
displayFrame(autoscale=True,markstars=False)
hdulist.close()
else: log("Invalid file type (must be SPE or FITS).",3)
#Load Dark frames for flat calibration
def openDarkForFlats(self):
global darkForFlat, darkForFlatExists, darkForFlatExp, darkForFlatDark
fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE dark for flat calibration',
defaultdir,filter='Data (*.spe *.fits)'))
if fname[-4:]=='.spe':
log("Opening dark file "+fname+" for flat calibration.",1)
dspe = read_spe.File(str(fname))
num_darks=dspe.get_num_frames()
#get all frames in SPE file
#stack as 3D numpy array
(frames,_)=dspe.get_frame(0)
frames=np.array([frames])
for i in range(1,num_darks):
(thisframe,_)=dspe.get_frame(i)
frames=np.concatenate((frames,[thisframe]),0)
darkForFlat=np.median(frames,axis=0)
darkForFlatExists = True
log("Mean dark counts for flat: "+str(np.mean(darkForFlat)))
#Write out master dark file as fits
#Set up header
prihdr = fits.Header()
prihdr['OBJECT'] = 'dark'
prihdr['IMAGETYP'] = 'dark'
prihdr['REDUCED'] = dt.datetime.now().isoformat()
prihdr['COMMENT'] = "Reduced by Keaton Bell's OLD MAID Software"
if hasattr(dspe, 'footer_metadata'):
footer_metadata = BeautifulSoup(dspe.footer_metadata, "xml")
ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime']
dt_begin = dateutil.parser.parse(ts_begin)
prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution'])
prihdr['DATE-OBS'] = str(dt_begin.isoformat())
prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning']
prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning']
prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model']
prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text
prihdr['COMMENT'] = "SPE file has footer metadata"
darkForFlatExp=np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.)
log("Exposure time for dark for flat: "+str(darkForFlatExp)+" s")
prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.)
#prihdr['SOFTWARE'] = footer_metadata.find(name='Origin')
prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text
if footer_metadata.find(name='Mode').text != 'AlwaysClosed':
prihdr['WARNING'] = 'Shutter not closed for dark frame.'
log("Shutter not closed for dark frame.",3)
else:
darkForFlatDark=True
else:
prihdr['WARNING'] = "No XML footer metadata."
log("No XML footer metadata.",3)
#Set up fits object
hdu = fits.PrimaryHDU(darkForFlat,header=prihdr)
darkpath = os.path.dirname(fname)
fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits'
log("Writing master dark as "+fitsfilename)
hdu.writeto(os.path.join(darkpath, fitsfilename),clobber=True)
#Close SPE
dspe.close()
#Option to load as Fits
elif fname[-5:]=='.fits':
log("Opening dark file "+fname+" for flat calibration.",1)
hdulist = fits.open(fname)
prihdr = hdulist[0].header
darkForFlat=hdulist[0].data
darkForFlatExp = np.round(float(prihdr['EXPTIME']))
log("Exposure time for dark for flat: "+str(darkForFlatExp)+" s")
log("Mean dark counts: "+str(np.mean(darkForFlat)))
if prihdr['SHUTTER'] != 'AlwaysClosed':
prihdr['WARNING'] = 'Shutter not closed for dark frame.'
log("Shutter not closed for dark frame for flat.",3)
else:
darkForFlatDark=True
darkForFlatExists = True
processframe()
displayFrame(autoscale=True,markstars=False)
hdulist.close()
else: log("Invalid file type (must be SPE or FITS).",3)
#Load Flat frames
def openFlat(self):
global flat, flatExists, flatReduced
fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE flat file',
defaultdir,filter='Data (*.spe *.fits)'))
if fname[-4:]=='.spe':
if darkForFlatExists == False:
log("Import dark for reducting flats before importing flat SPE file.",3)
else:
log("Opening flat file "+fname,1)
fspe = read_spe.File(fname)
num_flats=fspe.get_num_frames()
#get all frames in SPE file
#stack as 3D numpy array
(frames,_)=fspe.get_frame(0)
modes=[]
frames = frames - darkForFlat
modes.append(stats.mode(frames.flatten())[0][0])
frames=np.array([frames/modes[0]])
for i in range(1,num_flats):
(thisframe,_)=fspe.get_frame(i)
thisframe = thisframe-darkForFlat
#modes.append(stats.mode(thisframe.flatten())[0][0])
modes.append(np.median(thisframe.flatten()))
frames=np.concatenate((frames,[thisframe/modes[i]]),0)
flat=np.median(frames,axis=0)
flatExists=True
log("Median flat counts: "+str(np.median(modes)))
processframe()
displayFrame(autoscale=True,markstars=False)
#Write out fits file
#Set up header
prihdr = fits.Header()
prihdr['OBJECT'] = 'flat'
prihdr['IMAGETYP'] = 'flat'
if hasattr(fspe, 'footer_metadata'):
footer_metadata = BeautifulSoup(fspe.footer_metadata, "xml")
ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime']
dt_begin = dateutil.parser.parse(ts_begin)
prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution'])
prihdr['DATE-OBS'] = str(dt_begin.isoformat())
prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning']
prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning']
prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model']
prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text
prihdr['MODE'] = 1 #normalized
prihdr['COMMENT'] = "SPE file has footer metadata"
prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.)
flatexptime = np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.)
#check that dark exp time matches flat
if flatexptime == darkForFlatExp:
flatReduced = True
else:
log("Exp times for dark and flat do not match!",3)
if darkForFlatExp == 0:
log("Bias being used for flat subtraction.",1)
flatReduced=True
#prihdr['SOFTWARE'] = footer_metadata.find(name='Origin')
prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text
prihdr['REDUCED'] = dt.datetime.now().isoformat()
else:
prihdr['WARNING'] = "No XML footer metadata."
log("No XML footer metadata.",3)
#Set up fits object
#Only write flat if properly dark subtracted:
if darkForFlatDark and flatReduced:
hdu = fits.PrimaryHDU(flat,header=prihdr)
flatpath = os.path.dirname(fname)
fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits'
log("Writing master flat as "+fitsfilename)
hdu.writeto(os.path.join(flatpath, fitsfilename),clobber=True)
#Close SPE
fspe.close()
#Option to load as Fits
elif fname[-5:]=='.fits':
log("Opening flat file "+fname,1)
hdulist = fits.open(fname)
prihdr = hdulist[0].header
flat=hdulist[0].data
flatExists = True
flatmode= float(prihdr["mode"])
if flatmode == 1: #Properly normalized?
flatReduced=True
else:
log("Mode of master flat is "+str(flatmode)+". Not properly normalized?",3)
processframe()
displayFrame(autoscale=True,markstars=False)
hdulist.close()
else: log("Invalid file type (must be SPE).",3)
#Restore previously "bad" points
def restorePts(self):
global bad
log("Deselecting "+str(len(bad))+" points.")
bad=[]
updatelcs(i=framenum)
#Undo most recently selected "bad" point
def undoBad(self):
global bad
_ = bad.pop()
#Set up aperture size menu options
def setupApsizeMenu(self):
for size in apsizes:
self.changeApertureMenu.addAction(str(size)+' pixels',lambda s=size: setApSize(s))
#Set up comp star selection menu options
def addCompStarOption(self,i):
self.changeCompStarMenu.addAction('Comp Star #'+str(i),lambda s=i: setCompStar(s))
#Change Smoothing parameters
def changeSmooth(self):
kernel.openKernelDialog()
#Run Photometry
def run(self):
#Do aperture photometry on selected stars
global numstars, selectingstars
if stage == 1:
if len(stars) == 0:
log("No stars selected. Select stars before running.",3)
else:
numstars = len(stars)
#Write original coordinates and seeing to phot_coords.orig
f = open(rundir+'/phot_coords.orig', 'w')
for j,star in enumerate(stars):
f.write('{:.2f} {:.2f} {:.2f}\n'.format(star[0],star[1],seeing[j]))
f.close()
selectingstars=False
stage2()
#Confirm Quit
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Really quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
# Make the App have a window and dock area.
app = QtGui.QApplication([])
win = WithMenu()
area = DockArea()
win.setCentralWidget(area)
win.resize(1500,800)
win.setWindowTitle('OLD MAID Software')
## Set up each of the docks (to hold the widgets)
d1 = Dock("Observing Log", size=(500,500))
d2 = Dock("Process Log", size=(500,500))
d3 = Dock("Fourier Transform", size=(500,500))
d4 = Dock("Smoothed Light Curve", size=(1000,250))
d5 = Dock("Image", size=(500,500))
d6 = Dock("Divided Light Curve", size=(1000,250))
d7 = Dock("Raw Counts", size=(500,250))
d8 = Dock("Sky Brightness", size=(1000,250))
d9 = Dock("Seeing", size=(1000,250))
#Define initial layout
area.addDock(d4, 'left')
area.addDock(d1, 'right',d4)
area.addDock(d6, 'above', d4)
area.addDock(d9, 'bottom', d4)
area.addDock(d8, 'above', d9)
area.addDock(d7, 'above', d8)
area.addDock(d5, 'bottom',d1)
area.addDock(d2, 'bottom', d5)
area.addDock(d3, 'bottom', d7)
area.moveDock(d5,'right',d3)
#Define and place widgets into the docks
## First dock holds the Observing Log
#Type of widget: Form
w1 = pg.LayoutWidget()
#Name the form elements
observer = QtGui.QLabel('Observer')
target = QtGui.QLabel('Target')
filt = QtGui.QLabel('Filter')
logtext = QtGui.QLabel('Log')
#Define the types of fields
observerEdit = QtGui.QLineEdit()
targetEdit = QtGui.QLineEdit()
filtEdit = QtGui.QComboBox()
filtEdit.addItems(["BG40","u'","g'","r'","i'","z'","Other"])
logEdit = QtGui.QTextEdit()
logEdit.setText("WARNING: None of these log fields are saved!")
#Put the fields in the form
w1.addWidget(observer, 1, 0)
w1.addWidget(observerEdit, 1, 1)
w1.addWidget(target, 2, 0)
w1.addWidget(targetEdit, 2, 1)
w1.addWidget(filt, 3, 0)
w1.addWidget(filtEdit, 3, 1)
w1.addWidget(logtext, 4, 0)
w1.addWidget(logEdit, 4, 1, 6, 1)
#Put the widget in the dock
d1.addWidget(w1)
## Process Log
# Records activity.
w2 = pg.LayoutWidget()
processLog = QtGui.QTextEdit()
processLog.setReadOnly(True)
w2.addWidget(processLog, 0, 0, 6, 1)
d2.addWidget(w2)
# This widget need special functions to get messages:
def log(text,level=0):
'''log messages to the process log and log file
text is the message for the log
level indicated how important it is:
level=0: Routine background process: gray text;
level=1: User influenced action: black text;
level=2: Major change: bold black;
level=3: Warning message: bold red;
'''
text=str(text)
colors = ['darkgray','black','black','red']
prefix = ['','','','WARNING: ']
fontweight = [50,50,75,75]
if level in range(4):
processLog.setTextColor(QtGui.QColor(colors[level]))
processLog.setFontWeight(fontweight[level])
processLog.append(prefix[level]+text)
else: log('Level assigned to message "'+text+'" out of range.',level=3)
## Light Curve
# It's a plot
w6 = pg.PlotWidget(title="Divided Light Curve",labels={'left': 'rel. flux', 'bottom': 'time (s)'})
# Set up plot components
# Raw points
s1 = pg.ScatterPlotItem(brush=(255,0,0), pen='w',symbol='o')
# Bad (ignored) points #Not currently displayed since it causes scaling issues.
#s2 = pg.ScatterPlotItem(brush=(255,0,0), pen='b',symbol='o')
# Connecting lines
l1 = pg.PlotCurveItem()
#Add components to plot widget.
w6.addItem(s1)
#w6.addItem(s2)
w6.addItem(l1)
#Add widget to dock
d6.addWidget(w6)
# Make points change color when clicked
def clicked(plot, points):
global bad
for p in points:
if p.pos()[0]/exptime in bad:
bad.remove(p.pos()[0]/exptime)
else:
bad.append(p.pos()[0]/exptime)
updatelcs(i=framenum)
s1.sigClicked.connect(clicked)
#s2.sigClicked.connect(clicked)
## Smoothed Light Curve
w4 = pg.PlotWidget(title="Smoothed Light Curve",labels={'left': 'smoothed flux', 'bottom': 'time (s)'})
ss1 = pg.ScatterPlotItem(brush=(255,0,0), pen='w',symbol='o')
sl1 = pg.PlotCurveItem()
w4.addItem(ss1)
w4.addItem(sl1)
d4.addWidget(w4)
## Raw Star/Sky Counts
w7 = pg.PlotWidget(title="Raw Star Counts",labels={'left': 'flux summed in aperture', 'bottom': 'time (s)'})
d7.addWidget(w7)
#Hold the individual plot items in this list once they are created:
rawcounts=[]
## Sky
w8 = pg.PlotWidget(title="Sky Brightness",labels={'left': 'median sky counts', 'bottom': 'time (s)'})
sky = pg.PlotCurveItem()
w8.addItem(sky)
d8.addWidget(w8)
## Seeing
w9 = pg.PlotWidget(title="Seeing",labels={'left': 'FWHM (pixels)', 'bottom': 'time (s)'})
d9.addWidget(w9)
gridlines = pg.GridItem()
w9.addItem(gridlines)
#Hold the individual plot items in this list once they are created:
seeingplots = []
## Fourier Transform
w3 = pg.PlotWidget(title="Fourier Transform",labels={'left': 'amplitude (mma)', 'bottom': 'freq (muHz)'})
ft = w3.plot(pen='y')
d3.addWidget(w3)
## Image
w5 = pg.ImageView()
w5.ui.roiBtn.hide()
#w5.ui.normBtn.hide() #Causing trouble on windows
#Define function for selecting stars. (must be defined before linking the click action)
def click(event):#Linked to image click event
global stars, seeing
if event.button() == 1 and selectingstars:
event.accept()
pos = event.pos()
#x and y are swapped in the GUI!
x=pos.x()
y=pos.y()
#log('Clicked at ({:.2f}, {:.2f})'.format(x,y),level=0)
#improve coordinates
dx,dy,newseeing = improvecoords(x,y)
#round originals so original position *within* pixel doesn't affect answer
newcoords=[np.floor(x)+dx,np.floor(y)+dy]
stars.append(newcoords)
seeing.append(newseeing)
#make menuoption for comp star selection
if len(stars) > 1: win.addCompStarOption(len(stars)-1)
#Mark stars in image display
targs.setData([p[0] for p in stars],[p[1] for p in stars])
targs.setPen(pencolors[0:len(stars)])
#Set up plot for raw counts and seeing:
rawcounts.append(pg.ScatterPlotItem(pen=pencolors[len(stars)-1],symbol='o',size=1))
seeingplots.append(pg.PlotCurveItem(pen=seeingcolors[len(stars)-1]))
log('Star selected at ({:.2f}, {:.2f})'.format(newcoords[0],newcoords[1]),level=1)
elif event.button() == 2:
event.accept()#Passed on to other functionality if not accepted.
print "RIGHT!"
w5.getImageItem().mouseClickEvent = click #Function defined below
#w5.keyPressEvent = moveCircles # Seems to be the right thing for detecting frame changes,
#But I can't connect to it without overriding other behavior. May need to subclass this.
#Set up plot for apertures around stars
#print QtGui.QColor.colorNames() for available names.
stringcolors=['red','green','blue','magenta','orange','yellow',
'darkred','darkgreen','darkblue','darkmagenta','darkorange','darkgoldenrod',
'hotpink','seagreen','skyblue','salmon','brown','lightyellow']
pencolors = [pg.mkPen(QtGui.QColor(c), width=3) for c in stringcolors]
seeingcolors = [pg.mkPen(QtGui.QColor(c), width=1.5) for c in stringcolors]
targs = pg.ScatterPlotItem(brush=None, pen=pencolors[0],symbol='o',pxMode=False,size=8)
w5.addItem(targs)
#Add widget to dock
d5.addWidget(w5)
## Show the program!
win.show()
win.raise_()
#win.activateWindow()
# I think everything is set up enough to start doing stuff
# Send initial message to process log.
log("ProEMOnline initialized",2)
#log("Development version. Do not trust.",3)
stagechange(0)
log("Open SPE file to begin analysis.",1)
#### STAGE 1 ####
# Stage 1 starts when a SPE file is loaded.
# It's the "getting everything set up" stage
# Since the SPE file is loaded by the menu action, this will be one big
# function that is called on the new image.
#First define all the variables everything will need access to:
#These will be called into action as global variables.
#SPE Filename
spefile = ''
#SPE Data
spe=[]
#SPE file directory
rundir=''
#Does SPE have a footer?
hasFooter=False
#Number of frames in currently read spe file
numframes=0
#Exposure time for science frames
exptime=1. #If it can't be figured out, plots are in terms of frame #
#Dark data
dark = []
darkExists=False
darkExp=0 #exp time should match spe exptime
darkDark=False #shutter closed?
darkForFlat = []
darkForFlatExists=False
darkForFlatExp=0
darkForFlatDark=False
#Flat data
flat = []
flatExists=False
flatReduced=False #proper dark subtracted?
#Flag whether full reductions are being done (*correct* darks and flat)
#Number of last *reduced* (photometry measures) frame
framenum=-1 #none yet
#Flag to indicate whether we are currently selecting stars in the frame:
selectingstars = False
#Number of stars to do photometry on (target first)
numstars = 0 #0 means we haven't selected stars yet.
#Star coords
stars = [] #list of list of list of coords
#Image data:
img=[] #only hold current image to save tiem
#And another version to look nice
displayimg=[] #only hold current image to save tiem
#Keep track of "Bad" points
bad=[]
#Elapsed timestamps
rawtimes=[] #start of timestamp
#Search radius (box for now), improve later
pixdist=10 #(super)pixels
#List of median background counts:
backmed=[]
#List of background variances
backvar=[]
#Seeing for each star,frame:
seeing=[]
#Binning
binning=4
def stage1(fname):
#Load SPE File
#Access needed global vars
global spefile,spe,binning,exptime,dark,flat
#Announce Stage 1
stagechange(1)
#Record SPE filename this once
spefile = fname
#Read in SPE data
spe = read_spe.File(spefile)
binning = 1024/spe.get_frame(0)[0].shape[0]
log(str(spe.get_num_frames()) + ' frames read in.')
exptime=getexptime(spe)
log('Inferred exposure time: '+str(exptime)+' s')
if hasattr(spe, 'footer_metadata'):
#log('SPE file has footer.')
exptime=np.round(float(BeautifulSoup(spe.footer_metadata, "xml").find(name='ExposureTime').text)/1000.)
#log('Exposute time from footer: '+str(exptime)+' s')
#now display the first frame
processframe()
displayFrame(autoscale=True,markstars=False)
#Load calibration frames and set up
log("Please load dark, flat, and dark for flat files",1)
dark = np.zeros(img[0].shape)
flat = np.ones(img[0].shape)
#Select stars:
selectstars()
#spe.close() #In real version we'll close spe
win.setupApsizeMenu()
#Determine the exposuretime of a SPE file without a footer
def getexptime(thisspe):
#Input open SPE file
#don't read lots of frames in large files
numtoread = min([thisspe.get_num_frames(),11])
tstamps = np.zeros(numtoread)
for f in range(numtoread):
tstamps[f] = spe.get_frame(f)[1]['time_stamp_exposure_started']
timediff = tstamps[1:numtoread]-tstamps[:numtoread-1]
return np.round(np.median(timediff/1e6))
#Define all the stuff that needs to be done to each incoming frame
def processframe(i=0):
global img,displayimg,rawtimes,backmed,backvar,framenum
(thisframe,thistime) = spe.get_frame(i)
#calibrate (doesn't do anything if calibration frames are not available):
if darkExists: thisframe=(thisframe-dark)
if flatExists: thisframe=thisframe/flat
#read in frame
img=np.transpose(thisframe)
backgroundmed,backgroundvar=charbackground()
#append stuff to global variables
#Replace if this frame already exists, otherwise append
if i <= framenum: #replace
#log('Re-processing frame '+str(i)+' of '+str(framenum))
rawtimes[i]=thistime['time_stamp_exposure_started']
backmed[i]=backgroundmed
backvar[i]=backgroundvar
else: #append
#log('Processing frame '+str(i)+' of '+str(framenum))
rawtimes.append(thistime['time_stamp_exposure_started'])
backmed.append(backgroundmed)
backvar.append(backgroundvar)
#make display image
newdisplayimg=np.copy(img)
newdisplayimg[0,0]=0
imgvals = newdisplayimg.flatten()
img99percentile = np.percentile(imgvals,99)
newdisplayimg[newdisplayimg > img99percentile] = img99percentile
#log("Framenum: "+str(framenum),2)
#Replace if this frame already exists, otherwise append
displayimg=newdisplayimg
framenum=i
#Function to characterize the background to find stellar centroids accurately
#This should be done for each frame as it's read in
def charbackground():
"""Characterize the image background, median and variance
for frame currenly held in img
"""
backgroundmed = biweight_location(img)
backgroundvar = biweight_midvariance(img)
return backgroundmed, backgroundvar
#show the image to the widget
def displayFrame(autoscale=False,markstars=True):
"""Display an RBG image
i is index to display
Autoscale optional.
Return nothing.
"""
#Make sure i is in range
if autoscale:
#lowlevel=np.min(thisimg[thisimg > 0])
lowlevel=np.min(displayimg)
if np.sum(displayimg > 0) > 100:
lowlevel=np.percentile(displayimg[displayimg > 0],3)
highlevel=np.max(displayimg)-1
w5.setImage(np.array(displayimg),autoRange=True,levels=[lowlevel,highlevel],)
else:
w5.setImage(np.array(displayimg),autoRange=False,autoLevels=False)
#Draw position circles:
if markstars and len(stars) > 0:
targs.setData([p[0] for p in stars[framenum]],[p[1] for p in stars[framenum]])
targs.setSize(2.*apsizes[apsizeindex])
targs.setPen(pencolors[0:numstars])
def selectstars():
'''Select stars in the current frame.
Click to select any number in the first image.
Click to select numstars in later images to get following back on track.
'''
global selectingstars
selectingstars = True
def gaussian(x, A, sigma):
#Define a gaussian for finding FWHM
return A*np.exp(-(x)**2/(2.*sigma**2))
def improvecoords(x,y,i=framenum,pixdist=pixdist,fwhm=4.0,sigma=5.):
"""Improve stellar centroid position from guess value. (one at a time)
#return the adjustment than needs to be made in x and y directions
#also calculate the FWHM seeing
"""
#x=(1024/binning)-x
#y=(1024/binning)-y
#Keep track of motion
delta = np.zeros(2)
#Get image subregion around guess position
#Need to be careful not to ask for out-of-range indexes near a border
x0=x-pixdist
y0=y-pixdist
xdist=2*pixdist
ydist=2*pixdist
if x0 < 0: #if near the left edge
x0 = 0 #subregion from near given position
delta[0] += pixdist-x #adjust delta accordingly
if y0 < 0: #same in the y direction
y0 = 0
delta[1] += pixdist-y
if x+pixdist > img.shape[0]:
xdist = img.shape[0]-x+pixdist
if y+pixdist > img.shape[1]:
ydist = img.shape[1]-y+pixdist
subdata=img[x0:x0+xdist,y0:y0+ydist]
#print subdata.shape
sources = daofind(subdata - backmed[i], sigma*backvar[i], fwhm,
sharplo=0.1, sharphi=1.5, roundlo=-2.0, roundhi=2.0)
#From what I can tell, daofind returns x and y swapped, so fix it
returnedx = sources['ycentroid']
returnedy = sources['xcentroid']
thisseeing = np.nan
if len(sources) != 0:
strongsignal= np.argmax(sources['peak'])
delta[0]+=returnedx[strongsignal]-pixdist
delta[1]+=returnedy[strongsignal]-pixdist
#Fit with a gaussian
seeingdata = subdata.flatten() - backmed[i]
dist = []
for j in np.arange(subdata.shape[1])+0.5:
for k in np.arange(subdata.shape[0])+0.5:
dist.append(np.sqrt((returnedy[strongsignal]-k)**2.
+(returnedx[strongsignal]-j)**2.))
dist=np.array(dist).flatten()#distance between new coord and pixel centers
#plt.scatter(dist,seeingdata)
try: #ignores error if max iterations is hit
p0=[1000.,4.]#initial guesses
popt,_ = curve_fit(gaussian,np.append(dist,dist*-1.),np.append(seeingdata,seeingdata),p0=p0)
thisseeing = np.abs(popt[-1])*2.3548
#plt.plot(np.arange(0,10,.1),gaussian(np.arange(0,10,.1),popt[0],popt[1]))
except RuntimeError:
print "ERROR: gaussian fit did not converge for a star in frame "+str(i)
#plt.show()
else:
delta=np.zeros(2)
#also measure the seeing in this step:
#check that unique source found
'''
if len(sources) == 0:
log("Frame #"+str(i),1)
log("WARNING: no sources found in searched region near ({:.2f}, {:.2f}).".format(x,y))
#delta = [0,0] in this case
else:
if len(sources) > 1:
log("Frame #"+str(i),1)
log("WARNING: non-unique solution found for target near ({:.2f}, {:.2f}).".format(x,y))
log(str(len(sources))+" signals in window. Using brightest.")
#Take brightest star found
'''
#handle stars that were not found #Move this outside this function
"""
if [0,0] in delta and follow:
meandeltax=np.mean(delta[np.where(delta[:,0] != 0),0])
meandeltay=np.mean(delta[np.where(delta[:,1] != 0),1])
delta[np.where(delta[:,0] == 0)] += [meandeltax,meandeltay]
"""
return delta[0],delta[1],thisseeing
#### STAGE 2 ####
#Aperture details (provide a way to change these!)
apsizes=np.arange(1,11)
apsizeindex=3
r_in = 16. #inner sky annulus radius #change in terms of binning eventually
r_out = 24. #outer sky annulus radius #change in terms of binning eventually
def setApSize(size):
global apsizeindex
log("Aperture size set to "+str(size)+" pixels.",1)
#log("(Updates on next frame.)")
if size in apsizes:
apsizeindex=np.where(apsizes == size)[0][0]
targs.setSize(2*size)# Currently doesn't update until next click/frame
if stage > 1:
updatelcs(i=framenum)
compstar = 1 #which star to divide by
def setCompStar(s):
global compstar
compstar = s
log("Now dividing by comparsion star #"+str(s),1)
updatelcs(framenum)
#Phot results: variables to hold light curves and uncertainties
photresults=np.array([])
#Run the stage 2 loop
def stage2():
global stars,seeing, spe, stage, hasFooter
stagechange(2)
#Add plot items for raw counts panel to plot
for splot in rawcounts: w7.addItem(splot)
for splot in seeingplots: w9.addItem(splot)
#Make stars array an array of arrays of star coord arrays (yikes)
# i.e, it needs to get pushed a level deeper
stars=[stars]
#same with seeing
seeing=np.array([seeing])
#Run photometry on the first frame
dophot(0)
updatelcs(i=0)
updatehack()
#Start timer that looks for new data
timer2.start(min(exptime*1000.,5000.))# shorter of exptime and 5 sec
timer3.start(1.*60*1000)#update every 1 minutes
#This currently freezes up the UI. Need to thread, but not enough time
#to implement this currently. Use a hack for now
'''
#Run the loop:
fsize_spe_old = 0
while not hasFooter:
#Update only if there's new data
fsize_spe_new = os.path.getsize(spefile)
if fsize_spe_new > fsize_spe_old:
spe = read_spe.File(spefile)
numframes = spe.get_num_frames()
log('Processing frames '+str(framenum)+'-'+str(numframes),1)
while framenum < numframes:
nextframe()
if hasattr(spe, 'footer_metadata'):
hasFooter = True
log('SPE footer detected. Data acquisition complete.',2)
stagechange(3)
spe.close()
fsize_spe_old = fsize_spe_new
'''
fsize_spe_old = 0#Keep track if new spe file is larger that old one
def updatehack():
global spe, hasFooter, numframes,fsize_spe_old
#Only look for new data if not currently processing new data
if not timer.isActive():
#Update only if there's new data
fsize_spe_new = os.path.getsize(spefile)
if fsize_spe_new > fsize_spe_old and stage ==2:
spe = read_spe.File(spefile)
numframes = spe.get_num_frames()
if framenum+1==numframes-1:log('Processing frame '+str(framenum+1))
else: log('Processing frames '+str(framenum+1)+'-'+str(numframes-1),1)
timer.start(100)
#Update plots
updatelcs(i=framenum)
if hasattr(spe, 'footer_metadata'):
hasFooter = True
timer3.stop()
fsize_spe_old = fsize_spe_new
def nextframehack():
#call nextframe until you're caught up
global framenum,spe
nextframe()
updatelcs(i=framenum)
if framenum >= numframes-1:
timer.stop()
updateft(i=framenum)
if hasFooter:
log('SPE footer detected. Data acquisition complete.',2)
stagechange(3)
log("Image processing complete",2)
writetimestamps()
displayFrame(autoscale=True)
spe.close()
#This timer catches up on photometry
timer = pg.QtCore.QTimer()#set up timer to avoid while loop
timer.timeout.connect(nextframehack)
#This timer checks for new data
timer2 = pg.QtCore.QTimer()
timer2.timeout.connect(updatehack)
#For demo purposes, read in the next frame of the spe file each time this is called
def nextframe():
global stars, seeing
#if stage == 2:
oldcoords = stars[framenum]
processframe(i=framenum+1) #Frame num increases here.
newcoords=[]
newseeing=[]
for coord in oldcoords:
dx,dy,thisseeing = improvecoords(coord[0],coord[1],i=framenum)
newcoords.append([np.floor(coord[0])+.5+dx,np.floor(coord[1])+.5+dy])
newseeing.append(thisseeing)
stars.append(newcoords)
seeing = np.append(seeing,[newseeing],axis=0)
#Show the frame
displayFrame(autoscale=True,markstars=True)
#Perform photometry
dophot(i=framenum)
#Update light curves
#updatelcs(i=framenum) #only after all the new photometry is done.
def dophot(i):
'''Do photometric measurements.
Stars have been selected. Do aperture photometry on given frame
'''
global photresults
#print "dophot(i) called with i="+str(i)
#Do the aperture photometry
#The aperture_photometry() function can do many stars at once
#But you must first do a background subtraction
#We're going to save a lot of information in this step:
#Total counts and uncertainty for every aperture size for every star
#And eventually for every frame...
#Note that the photometry package seems to reference x and y coords
#as the tranpose of what we've been using. Switch the order here:
coords = [star[::-1] for star in stars[i]]
thisphotometry = np.zeros((len(coords),len(apsizes)))
for n in range(numstars):
#Loop through the stars in the image
#annulus_aperture = CircularAnnulus(coords[n], r_in=r_in, r_out=r_out)
#print aperture_photometry(img[i],annulus_aperture).keys()
#background_mean = aperture_photometry(img[i],annulus_aperture)['aperture_sum'][0]/annulus_aperture.area()
#NOTE on the above line: This should really be a median!
#Issue 161 on photutils https://github.com/astropy/photutils/issues/161 is open as of 09/28/15
gain = 12.63 #? From PI Certificate of Performance for "traditional 5MHz gain." Confirm this value!
#loop through aperture sizes
for j,size in enumerate(apsizes):
aperture = CircularAperture(np.array(coords[n]), r=size)
#phot = aperture_photometry(x-background_mean,aperture,error=backgroundvar,gain=gain)
#Why am I getting negative numbers?
#phot = aperture_photometry(img[i]-np.median(img),aperture)
phot = aperture_photometry(img-backmed[i],aperture)
thisphotometry[n,j]=phot['aperture_sum'][0]
#print "photometry ",thisphotometry
if i == 0:
photresults = np.array([thisphotometry])
else:
#print "photresults dimensions are "+str(photresults.shape)
#print "trying to append shape "+str(thisphotometry.shape)
photresults = np.append(photresults,[thisphotometry],axis=0)
#print "photresults dimensions are "+str(photresults.shape)
#yay. This deserves to all be checked very carefully, especially since the gain only affects uncertainty and not overall counts.
#Allow different kernel types:
kerneltypes = ['Uniform','Epanechnikov']
#set up a dialog to change the kernel details:
class KernelDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(KernelDialog, self).__init__(parent)
typeLabel = QtGui.QLabel("Kernel &type")
self.typeEdit = QtGui.QComboBox()
self.typeEdit.addItems(kerneltypes)
#self.typeEdit.setCurrentIndex(currentind)
typeLabel.setBuddy(self.typeEdit)
widthLabel = QtGui.QLabel("Kernel &width")
self.widthEdit = QtGui.QSpinBox()
self.widthEdit.setMinimum(2)
self.widthEdit.setMaximum(200)
#self.widthEdit.setValue(currentwidth)
widthLabel.setBuddy(self.widthEdit)
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
grid = QtGui.QGridLayout(self)
grid.addWidget(typeLabel,0,0)
grid.addWidget(self.typeEdit,0,1)
grid.addWidget(widthLabel,1,0)
grid.addWidget(self.widthEdit,1,1)
grid.addWidget(self.buttons, 3, 0)
self.setLayout(grid)
self.setWindowTitle("Define Smoothing Kernel")
def kernelFormat(self):
kerneltype=int(self.typeEdit.currentIndex())
width=int(self.widthEdit.value())
return (kerneltype,width)
@staticmethod
def getKernelFormat(parent = None):
dialog = KernelDialog(parent)
result = dialog.exec_()
kerneltype,width = dialog.kernelFormat()
return (kerneltype,width, result == QtGui.QDialog.Accepted)
#set up a class that holds all the smoothing kernel information
class smoothingkernel:
"""Holds all smoothing kernel info"""
kerneltype = 0
width = 10 #points
kernel=[]
types = kerneltypes
def setkernel(self,kerneltype,width):
if kerneltype == 1: #Epanechnikov
u=(2.*np.arange(width)/(float(width)-1.))-0.5
self.kernel = 0.75*(1.-u**2.)
self.kernel /= np.sum(self.kernel)
log("Using "+self.types[kerneltype]+" smoothing kernel of width "+str(width))
elif kerneltype == 0: #Uniform
self.kernel = np.ones(width)/float(width)
log("Using "+self.types[kerneltype]+" smoothing kernel of width "+str(width))
def openKernelDialog(self):
dkerneltype,dwidth,daccepted = KernelDialog.getKernelFormat()
if daccepted and (dkerneltype in range(len(kerneltypes))) and (dwidth > 1):
self.setkernel(dkerneltype,dwidth)
def __init__(self):
self.setkernel(0,10)
#set up the kernel object
kernel=smoothingkernel()
#Update display.
def updatelcs(i):
#Identify which points to include/exclude, up to frame i
goodmask=np.ones(i+1, np.bool)
goodmask[bad] = False
badmask = np.zeros(i+1, np.bool)
badmask[bad] = True
targdivided = photresults[:i+1,0,apsizeindex]/photresults[:i+1,compstar,apsizeindex]
times = np.arange(i+1)#Multiply by exptime for timestamps
goodfluxnorm=targdivided[goodmask[:i+1]]/np.abs(np.mean(targdivided[goodmask[:i+1]]))
s1.setData(exptime*times[goodmask[:i+1]],goodfluxnorm)
#s2.setData(times[badmask[:i]],targdivided[badmask[:i]])
l1.setData(exptime*times[goodmask[:i+1]],goodfluxnorm)
#sl1.setData(times[goodmask[:i]],fluxsmoothed[goodmask[:i]])
#Raw Counts:
for j,splot in enumerate(rawcounts): splot.setData(exptime*times,photresults[:,j,apsizeindex])
#Seeing:
for j,splot in enumerate(seeingplots[::-1]): splot.setData(exptime*times,seeing[:,j])
#Sky brightness
sky.setData(exptime*times,backmed)
def updateftfromtimer():
updateft(i=framenum)
def updateft(i=framenum):
oversample=10. #Oversampling factor
goodmask=np.ones(i+1, np.bool)
goodmask[bad] = False
targdivided = photresults[:i+1,0,apsizeindex]/photresults[:i+1,compstar,apsizeindex]
goodfluxnorm=targdivided[goodmask[:i+1]]/np.abs(np.mean(targdivided[goodmask[:i+1]]))
times = np.arange(i+1)#Multiply by exptime for timestamps
#Fourier Transform and smoothed lc
if goodmask.sum() > 2:
#This all requires at least two points
#Only update once per file read-in
interped = interp1d(exptime*times[goodmask[:i+1]],goodfluxnorm-1.)
xnew = np.arange(exptime*min(times[goodmask[:i]]),exptime*max(times[goodmask[:i+1]]),exptime)
ynew = interped(xnew)
#calculate FT
amp = 2.*np.abs(fft(ynew,n=len(ynew)*oversample))#FFT
amp /= float(len(ynew))
freq = fftfreq(len(amp),d=exptime)
pos = freq>=0 # keep positive part
ft.setData(1e6*freq[pos],1e3*amp[pos])
#Smoothed LC
#Update if there are enough points:
if len(ynew) > kernel.width:
fluxsmoothed=np.convolve(ynew,kernel.kernel,mode='same')
ss1.setData(xnew,fluxsmoothed)
#This timer recomputes the FT and smoothed lc infrequently
timer3 = pg.QtCore.QTimer()
timer3.timeout.connect(updateftfromtimer)
''' Not implemented yet!
#To keep the GUI from locking up, computationally intensive processes must
#be done in a thread. Set up that thread here:
class Stage2Thread(QtCore.QThread):
setTime = QtCore.pyqtSignal(int,int)
iteration = QtCore.pyqtSignal(threading.Event, int)
def run(self):
self.setTime.emit(0,300)
for i in range(300):
time.sleep(0.05)
event = threading.Event()
self.iteration.emit(event, i)
event.wait()
'''
#Write timestamps
def writetimestamps():
fpath_csv = os.path.splitext(spefile)[0]+'_timestamps.csv'
log("Writing absolute timestamps to file "+fpath_csv,2)
if hasattr(spe, 'footer_metadata'):
footer_metadata = BeautifulSoup(spe.footer_metadata, "xml")
trigger_response = footer_metadata.find(name='TriggerResponse').text
ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime']
dt_begin = dateutil.parser.parse(ts_begin)
ticks_per_second = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution'])
else:
log(("No XML footer metadata.\n" +
"Unknown trigger response.\n" +
"Using file creation time as absolute timestamp.\n" +
"Assuming 1E6 ticks per seconds."),3)
trigger_response = ""
dt_begin = dt.datetime.utcfromtimestamp(os.path.getctime(fpath_spe))
ticks_per_second = 1E6
idx_metadata_map = {}
for idx in xrange(spe.get_num_frames()):
(frame, metadata) = spe.get_frame(idx)
idx_metadata_map[idx] = metadata
df_metadata = pd.DataFrame.from_dict(idx_metadata_map, orient='index')
df_metadata = df_metadata.set_index(keys='frame_tracking_number')
df_metadata = df_metadata[['time_stamp_exposure_started', 'time_stamp_exposure_ended']].applymap(lambda x: x / ticks_per_second)
df_metadata = df_metadata[['time_stamp_exposure_started', 'time_stamp_exposure_ended']].applymap(lambda x : dt_begin + dt.timedelta(seconds=x))
df_metadata[['diff_time_stamp_exposure_started', 'diff_time_stamp_exposure_ended']] = df_metadata - df_metadata.shift()
log("Trigger response = {tr}".format(tr=trigger_response))
log("Absolute timestamp = {dt_begin}".format(dt_begin=dt_begin))
log("Ticks per second = {tps}".format(tps=ticks_per_second))
df_metadata.head()
# Write out as CSV to source directory of SPE file.
df_metadata.to_csv(fpath_csv, quoting=csv.QUOTE_NONNUMERIC)
saveScreenshot()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
if len(sys.argv) > 1:
defaultdir = sys.argv[1]
QtGui.QApplication.instance().exec_()
| mit |
ltiao/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
rajathkumarmp/numpy | numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| bsd-3-clause |
bigdataelephants/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 26 | 2870 | import numpy as np
from scipy.sparse import csr_matrix
from .... import datasets
from ..unsupervised import silhouette_score
from ... import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
"""Tests the Silhouette Coefficient. """
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
"""Assert Silhouette Coefficient != nan when there is 1 sample in a class.
This tests for the condition that caused issue 960.
"""
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
""" Assert 2 <= n_labels <= nsample -1 """
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
"Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/axes_grid/demo_curvelinear_grid2.py | 15 | 1839 | import numpy as np
#from matplotlib.path import Path
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axes_grid.axislines import Subplot
import mpl_toolkits.axes_grid.angle_helper as angle_helper
def curvelinear_test1(fig):
"""
grid for custom transform.
"""
def tr(x, y):
sgn = np.sign(x)
x, y = np.abs(np.asarray(x)), np.asarray(y)
return sgn*x**.5, y
def inv_tr(x,y):
sgn = np.sign(x)
x, y = np.asarray(x), np.asarray(y)
return sgn*x**2, y
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = None,
lat_cycle = None,
lon_minmax = None, #(0, np.inf),
lat_minmax = None,
)
grid_helper = GridHelperCurveLinear((tr, inv_tr),
extreme_finder=extreme_finder)
ax1 = Subplot(fig, 111, grid_helper=grid_helper)
# ax1 will have a ticks and gridlines defined by the given
# transform (+ transData of the Axes). Note that the transform of
# the Axes itself (i.e., transData) is not affected by the given
# transform.
fig.add_subplot(ax1)
ax1.imshow(np.arange(25).reshape(5,5),
vmax = 50, cmap=plt.cm.gray_r,
interpolation="nearest",
origin="lower")
# tick density
grid_helper.grid_finder.grid_locator1._nbins = 6
grid_helper.grid_finder.grid_locator2._nbins = 6
if 1:
fig = plt.figure(1, figsize=(7, 4))
fig.clf()
curvelinear_test1(fig)
plt.show()
| unlicense |
wavelets/zipline | zipline/finance/performance/period.py | 3 | 16164 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Period
==================
Performance Periods are updated with every trade. When calling
code needs a portfolio object that fulfills the algorithm
protocol, use the PerformancePeriod.as_portfolio method. See that
method for comments on the specific fields provided (and
omitted).
+---------------+------------------------------------------------------+
| key | value |
+===============+======================================================+
| ending_value | the total market value of the positions held at the |
| | end of the period |
+---------------+------------------------------------------------------+
| cash_flow | the cash flow in the period (negative means spent) |
| | from buying and selling securities in the period. |
| | Includes dividend payments in the period as well. |
+---------------+------------------------------------------------------+
| starting_value| the total market value of the positions held at the |
| | start of the period |
+---------------+------------------------------------------------------+
| starting_cash | cash on hand at the beginning of the period |
+---------------+------------------------------------------------------+
| ending_cash | cash on hand at the end of the period |
+---------------+------------------------------------------------------+
| positions | a list of dicts representing positions, see |
| | :py:meth:`Position.to_dict()` |
| | for details on the contents of the dict |
+---------------+------------------------------------------------------+
| pnl | Dollar value profit and loss, for both realized and |
| | unrealized gains. |
+---------------+------------------------------------------------------+
| returns | percentage returns for the entire portfolio over the |
| | period |
+---------------+------------------------------------------------------+
| cumulative\ | The net capital used (positive is spent) during |
| _capital_used | the period |
+---------------+------------------------------------------------------+
| max_capital\ | The maximum amount of capital deployed during the |
| _used | period. |
+---------------+------------------------------------------------------+
| period_close | The last close of the market in period. datetime in |
| | pytz.utc timezone. |
+---------------+------------------------------------------------------+
| period_open | The first open of the market in period. datetime in |
| | pytz.utc timezone. |
+---------------+------------------------------------------------------+
| transactions | all the transactions that were acrued during this |
| | period. Unset/missing for cumulative periods. |
+---------------+------------------------------------------------------+
"""
from __future__ import division
import logbook
import numpy as np
import pandas as pd
from collections import Counter, OrderedDict, defaultdict
from six import iteritems, itervalues
import zipline.protocol as zp
from . position import positiondict
log = logbook.Logger('Performance')
class PerformancePeriod(object):
def __init__(
self,
starting_cash,
period_open=None,
period_close=None,
keep_transactions=True,
keep_orders=False,
serialize_positions=True):
self.period_open = period_open
self.period_close = period_close
self.ending_value = 0.0
self.period_cash_flow = 0.0
self.pnl = 0.0
# sid => position object
self.positions = positiondict()
self.ending_cash = starting_cash
# rollover initializes a number of self's attributes:
self.rollover()
self.keep_transactions = keep_transactions
self.keep_orders = keep_orders
# Arrays for quick calculations of positions value
self._position_amounts = pd.Series()
self._position_last_sale_prices = pd.Series()
self.calculate_performance()
# An object to recycle via assigning new values
# when returning portfolio information.
# So as not to avoid creating a new object for each event
self._portfolio_store = zp.Portfolio()
self._positions_store = zp.Positions()
self.serialize_positions = serialize_positions
def rollover(self):
self.starting_value = self.ending_value
self.starting_cash = self.ending_cash
self.period_cash_flow = 0.0
self.pnl = 0.0
self.processed_transactions = defaultdict(list)
self.orders_by_modified = defaultdict(OrderedDict)
self.orders_by_id = OrderedDict()
def ensure_position_index(self, sid):
try:
self._position_amounts[sid]
self._position_last_sale_prices[sid]
except (KeyError, IndexError):
self._position_amounts = \
self._position_amounts.append(pd.Series({sid: 0.0}))
self._position_last_sale_prices = \
self._position_last_sale_prices.append(pd.Series({sid: 0.0}))
def add_dividend(self, div):
# The dividend is received on midnight of the dividend
# declared date. We calculate the dividends based on the amount of
# stock owned on midnight of the ex dividend date. However, the cash
# is not dispersed until the payment date, which is
# included in the event.
self.positions[div.sid].add_dividend(div)
def handle_split(self, split):
if split.sid in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[split.sid]
leftover_cash = position.handle_split(split)
self._position_amounts[split.sid] = position.amount
self._position_last_sale_prices[split.sid] = \
position.last_sale_price
if leftover_cash > 0:
self.handle_cash_payment(leftover_cash)
def update_dividends(self, todays_date):
"""
Check the payment date and ex date against today's date
to determine if we are owed a dividend payment or if the
payment has been disbursed.
"""
cash_payments = 0.0
stock_payments = Counter() # maps sid to number of shares paid
for sid, pos in iteritems(self.positions):
cash_payment, stock_payment = pos.update_dividends(todays_date)
cash_payments += cash_payment
stock_payments.update(stock_payment)
for stock, payment in iteritems(stock_payments):
position = self.positions[stock]
position.amount += payment
self.ensure_position_index(stock)
self._position_amounts[stock] = position.amount
self._position_last_sale_prices[stock] = \
position.last_sale_price
# credit our cash balance with the dividend payments, or
# if we are short, debit our cash balance with the
# payments.
# debit our cumulative cash spent with the dividend
# payments, or credit our cumulative cash spent if we are
# short the stock.
self.handle_cash_payment(cash_payments)
# recalculate performance, including the dividend
# payments
self.calculate_performance()
def handle_cash_payment(self, payment_amount):
self.adjust_cash(payment_amount)
def handle_commission(self, commission):
# Deduct from our total cash pool.
self.adjust_cash(-commission.cost)
# Adjust the cost basis of the stock if we own it
if commission.sid in self.positions:
self.positions[commission.sid].\
adjust_commission_cost_basis(commission)
def adjust_cash(self, amount):
self.period_cash_flow += amount
def calculate_performance(self):
self.ending_value = self.calculate_positions_value()
total_at_start = self.starting_cash + self.starting_value
self.ending_cash = self.starting_cash + self.period_cash_flow
total_at_end = self.ending_cash + self.ending_value
self.pnl = total_at_end - total_at_start
if total_at_start != 0:
self.returns = self.pnl / total_at_start
else:
self.returns = 0.0
def record_order(self, order):
if self.keep_orders:
dt_orders = self.orders_by_modified[order.dt]
if order.id in dt_orders:
del dt_orders[order.id]
dt_orders[order.id] = order
# to preserve the order of the orders by modified date
# we delete and add back. (ordered dictionary is sorted by
# first insertion date).
if order.id in self.orders_by_id:
del self.orders_by_id[order.id]
self.orders_by_id[order.id] = order
def update_position(self, sid, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
pos = self.positions[sid]
self.ensure_position_index(sid)
if amount is not None:
pos.amount = amount
self._position_amounts[sid] = amount
if last_sale_price is not None:
pos.last_sale_price = last_sale_price
self._position_last_sale_prices[sid] = last_sale_price
if last_sale_date is not None:
pos.last_sale_date = last_sale_date
if cost_basis is not None:
pos.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
position = self.positions[txn.sid]
position.update(txn)
self.ensure_position_index(txn.sid)
self._position_amounts[txn.sid] = position.amount
self.period_cash_flow -= txn.price * txn.amount
if self.keep_transactions:
self.processed_transactions[txn.dt].append(txn)
def calculate_positions_value(self):
return np.dot(self._position_amounts, self._position_last_sale_prices)
def update_last_sale(self, event):
if event.sid not in self.positions:
return
if event.type != zp.DATASOURCE_TYPE.TRADE:
return
if not pd.isnull(event.price):
# isnan check will keep the last price if its not present
self.update_position(event.sid, last_sale_price=event.price,
last_sale_date=event.dt)
def __core_dict(self):
rval = {
'ending_value': self.ending_value,
# this field is renamed to capital_used for backward
# compatibility.
'capital_used': self.period_cash_flow,
'starting_value': self.starting_value,
'starting_cash': self.starting_cash,
'ending_cash': self.ending_cash,
'portfolio_value': self.ending_cash + self.ending_value,
'pnl': self.pnl,
'returns': self.returns,
'period_open': self.period_open,
'period_close': self.period_close
}
return rval
def to_dict(self, dt=None):
"""
Creates a dictionary representing the state of this performance
period. See header comments for a detailed description.
Kwargs:
dt (datetime): If present, only return transactions for the dt.
"""
rval = self.__core_dict()
if self.serialize_positions:
positions = self.get_positions_list()
rval['positions'] = positions
# we want the key to be absent, not just empty
if self.keep_transactions:
if dt:
# Only include transactions for given dt
transactions = [x.to_dict()
for x in self.processed_transactions[dt]]
else:
transactions = \
[y.to_dict()
for x in itervalues(self.processed_transactions)
for y in x]
rval['transactions'] = transactions
if self.keep_orders:
if dt:
# only include orders modified as of the given dt.
orders = [x.to_dict()
for x in itervalues(self.orders_by_modified[dt])]
else:
orders = [x.to_dict() for x in itervalues(self.orders_by_id)]
rval['orders'] = orders
return rval
def as_portfolio(self):
"""
The purpose of this method is to provide a portfolio
object to algorithms running inside the same trading
client. The data needed is captured raw in a
PerformancePeriod, and in this method we rename some
fields for usability and remove extraneous fields.
"""
# Recycles containing objects' Portfolio object
# which is used for returning values.
# as_portfolio is called in an inner loop,
# so repeated object creation becomes too expensive
portfolio = self._portfolio_store
# maintaining the old name for the portfolio field for
# backward compatibility
portfolio.capital_used = self.period_cash_flow
portfolio.starting_cash = self.starting_cash
portfolio.portfolio_value = self.ending_cash + self.ending_value
portfolio.pnl = self.pnl
portfolio.returns = self.returns
portfolio.cash = self.ending_cash
portfolio.start_date = self.period_open
portfolio.positions = self.get_positions()
portfolio.positions_value = self.ending_value
return portfolio
def get_positions(self):
positions = self._positions_store
for sid, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if sid in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[sid]
except KeyError:
pass
continue
# Note that this will create a position if we don't currently have
# an entry
position = positions[sid]
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
return positions
def get_positions_list(self):
positions = []
for sid, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
| apache-2.0 |
joyeshmishra/spark-tk | regression-tests/generatedata/gmm_datagen.py | 14 | 1129 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Generates data for gmm model
params: n_samples: number of rows
centers: number of centroids
n_features: number of columns"""
from sklearn.datasets.samples_generator import make_blobs
def gen_data(n_rows, k, features):
x,y = make_blobs(n_samples=n_rows, centers=k, n_features=features, random_state=14)
for row in x.tolist():
print ",".join(map(str,row))
gen_data(50, 5, 2)
| apache-2.0 |
biorack/metatlas | metatlas/io/write_utils.py | 1 | 2914 | """ Utility functions used in writing files"""
import filecmp
import logging
import os
import tempfile
logger = logging.getLogger(__name__)
def make_dir_for(file_path):
"""makes directories for file_path if they don't already exist"""
directory = os.path.dirname(file_path)
if directory != "":
os.makedirs(directory, exist_ok=True)
def check_existing_file(file_path, overwrite=False):
"""Creates directories as needed and throws an error if file exists and overwrite is False"""
make_dir_for(file_path)
try:
if not overwrite and os.path.exists(file_path):
raise FileExistsError(f"Not overwriting {file_path}.")
except FileExistsError as err:
logger.exception(err)
raise
def export_dataframe(dataframe, file_path, description, overwrite=False, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
overwrite: if False, raise error if file already exists
remaining arguments are passed through to to_csv()
"""
check_existing_file(file_path, overwrite)
dataframe.to_csv(file_path, **kwargs)
logger.info("Exported %s to %s.", description, file_path)
def raise_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to compare against
description: free string for logging
kwargs: passed through to to_csv()
If file_path exists and does not match file that would be generated by
saving dataframe to a csv, then raise ValueError
"""
if not os.path.exists(file_path):
return
with tempfile.NamedTemporaryFile(delete=False) as temp_path:
dataframe.to_csv(temp_path, **kwargs)
same = filecmp.cmp(file_path, temp_path.name)
os.remove(temp_path.name)
if same:
logger.info("Data in %s is the same as %s.", description, file_path)
else:
try:
raise ValueError("Data in %s is not the same as %s." % (description, file_path))
except ValueError as err:
logger.exception(err)
raise
def export_dataframe_die_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
kwargs: passed through to to_csv()
If file_path does not exist then save the dataframe there
If file_path exists and matches data in dataframe then do nothing
If file_path exists and does not match dataframe then raise ValueError
"""
raise_on_diff(dataframe, file_path, description, **kwargs)
if not os.path.exists(file_path):
export_dataframe(dataframe, file_path, description, **kwargs)
| bsd-3-clause |
shear/rppy | test_ruger_hti.py | 2 | 2682 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 17:24:04 2015
@author: Sean
"""
import rppy
import numpy as np
import matplotlib.pyplot as plt
p1 = 2000
vp1 = 3000
vs1 = 1500
e1 = 0.0
d1 = 0.0
y1 = 0.0
p2 = 2200
vp2 = 4000
vs2 = 2000
y2 = 0.1
d2 = 0.1
e2 = 0.1
theta = 30
phi = np.arange(0, 90, 1)
phit = np.array([1.2500, 4.9342, 8.6184, 11.842, 15.526, 19.211, 22.664,
25.888, 28.421, 30.724, 34.638, 38.092, 41.546, 45.461,
49.375, 53.289, 56.974, 60.888, 65.493, 69.408, 73.783,
79.079, 84.375, 89.211])
exp = np.array([0.19816, 0.19816, 0.19678, 0.19539, 0.19263, 0.19056,
0.18711, 0.18365, 0.18020, 0.17813, 0.17329, 0.16845,
0.16431, 0.15878, 0.15326, 0.14842, 0.14359, 0.13875,
0.13391, 0.12977, 0.12632, 0.12286, 0.12079, 0.12010])
Rpp = np.zeros(np.shape(phi))
Rpo = np.zeros(np.shape(phi))
Rpk = np.zeros(np.shape(phi))
for ind, phiv in enumerate(phi):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
theta, phiv)
Rpo[ind] = rppy.reflectivity.exact_ortho(rppy.reflectivity.Cij(vp1, vs1, p1, 0, 0, 0, e1, d1, y1, 0), p1,
rppy.reflectivity.Cij(vp2, vs2, p2, 0, 0, 0, e2, d2, y2, 0), p2,
0, 0, phiv, theta)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phiv, theta)
plt.figure(1)
plt.plot(phi, Rpp, phi, Rpo, phi, Rpk)
plt.show()
theta = np.arange(0, 60, 1)
phi = 45
Rpp = np.zeros(np.shape(theta))
Rpo = np.zeros(np.shape(theta))
Rpk = np.zeros(np.shape(theta))
Rpa = np.zeros(np.shape(theta))
for ind, thetav in enumerate(theta):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
thetav, phi)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phi, thetav)
Rpo = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, theta)
Rpa = rppy.reflectivity.aki_richards(vp1, vs1, p1, vp2, vs2, p2, theta)
plt.figure(2)
plt.plot(theta, Rpp, theta, Rpo, theta, Rpk, theta, Rpa)
plt.xlim([0, 60])
plt.ylim([0.125, 0.275])
plt.legend(['Ruger', 'Zoe', 'Vavrycuk', 'A-R'])
plt.show()
| bsd-2-clause |
andnovar/ggplot | ggplot/scales/scale_colour_gradient.py | 12 | 2017 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter
def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):
return [rgb2hex(cmap(bb)[:3]) for bb in breaks]
class scale_colour_gradient(scale):
"""
Specify a two- or three-point gradient.
Parameters
----------
name : Name of an existing gradient scheme
limits : list of the upper and lower bounds of the gradient
low : colour at the lower bound of the gradient
mid : colour at the middle of the gradient
high : Colour at the upper bound of the gradient
Examples
--------
>>> from ggplot import *
>>> diamons_premium = diamonds[diamonds.cut=='Premium']
>>> gg = ggplot(diamons_premium, aes(x='depth', y='carat', colour='price')) + \\
... geom_point()
>>> print(gg + scale_colour_gradient(low='red', mid='white', high='blue', limits=[4000,6000]) + \\
... ggtitle('With red-blue gradient'))
>>> print(gg + ggtitle('With standard gradient'))
"""
VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.color_label = self.name
if not (self.limits is None):
gg.color_limits = self.limits
color_spectrum = []
if self.low:
color_spectrum.append(self.low)
if self.mid:
color_spectrum.append(self.mid)
if self.high:
color_spectrum.append(self.high)
if self.low and self.high:
gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)
plt.cm.register_cmap(cmap=gradient2n)
# add them back to ggplot
gg.color_scale = colors_at_breaks(gradient2n)
gg.colormap = gradient2n
return gg
| bsd-2-clause |
cdek11/PLS | Code/PLS_Algorithm_Optimized.py | 2 | 5817 |
# coding: utf-8
# In[2]:
# Code to implement the optimized version of the PLS Algorithm
import pandas as pd
import numpy as np
import numba
from numba import jit
@jit
def mean_center_scale(dataframe):
'''Scale dataframe by subtracting mean and dividing by standard deviation'''
dataframe = dataframe - dataframe.mean()
dataframe = dataframe/dataframe.std()
return dataframe
@jit
def y_pred(Y_pred, i,b_dictionary,t_hat_dictionary,q_new_dictionary):
'''Find prediction for Y based on the number of components in this iteration'''
for j in range(1,i+1):
Y_pred = Y_pred + (b_dictionary[j]*t_hat_dictionary[j]).dot(q_new_dictionary[j].T)
return Y_pred
@jit
def rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary):
'''Find training RMSE'''
RMSE = np.sqrt(sum((Y_true - Y_pred)**2)/Y_true.shape[0])
RMSE_scaled = RMSE * response_std
RMSE_dictionary[i] = RMSE_scaled
return RMSE_dictionary
@jit
def core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary) :
'''Core PLS algorithm'''
#Here we have one variable in the Y block so q = 1
#and omit steps 5-8
q = 1
#For the X block, u = Y
u = Y #random y column from Y #Step 1
w_old = np.dot(u.T,X)/np.dot(u.T,u) #Step 2
w_new = w_old/np.linalg.norm(w_old) #Step 3
t = np.dot(X,w_new.T)/np.dot(w_new,w_new.T) #Step 4
#For the Y block can be omitted if Y only has one variable
q_old = np.dot(t.T,Y)/np.dot(t.T,t) #Step 5
q_new = q_old/np.linalg.norm(q_old) #Step 6
q_new_dictionary[i] = q_new
u = np.dot(Y,q_new.T)/np.dot(q_new,q_new.T) #Step 7
#Step 8: Check convergence
#Calculate the X loadings and rescale the scores and weights accordingly
p = np.dot(t.T,X)/np.dot(t.T,t) #Step 9
p_new = p.T/np.linalg.norm(p.T) #Step 10
t_new = t/np.linalg.norm(p.T) #Step 11
w_new = w_old/np.linalg.norm(p) #Step 12
#Find the regression coefficient for b for th inner relation
b = np.dot(u.T,t_new)/np.dot(t.T,t) #Step 13
b_dictionary[i] = b
#Calculation of the residuals
E_h = X - np.dot(t_new,p_new.T)
F_h = Y - b.dot(t_new.T).T.dot(q) #WORKS BUT IS THIS RIGHT?
#Set outer relation for the X block
#Xres_dictionary[i] = E_h #MAYBE REMOVE
X = E_h
#Set the mixed relation for the Y block
#Yres_dictionary[i] = F_h 3MAYBE REMOVE
Y = F_h
#Find estimated t hat
t_hat = np.dot(E_h,w_new.T)
t_hat_dictionary[i] = t_hat
E_h = E_h - np.dot(t_hat,p_new.T)
return X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h
def pls_optimized(path, path_test, predictors, response):
'''Function that takes a dataframe and runs partial least squares on numeric predictors for a numeric response.
Returns the residuals of the predictor (X block), response (Y block), and traininig RMSE'''
###TRAINING DATA
combined = predictors
#Load data
data = pd.DataFrame.from_csv(path)
combined.append(response)
data = data[combined]
response_std = data[response].std()
#Subtract the mean and scale each column
data = mean_center_scale(data)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X = data[predictors].as_matrix()
Y = data[[response]].as_matrix()
Y_true = Y #For prediction
#Get rank of matrix
rank = np.linalg.matrix_rank(X)
u = Y #set initial u as Y
Xres_dictionary = {}
Yres_dictionary = {}
q_new_dictionary ={}
b_dictionary = {}
t_hat_dictionary = {}
t_hat_train_dictionary = {}
t_hat_test_dictionary = {}
RMSE_dictionary = {}
RMSE_test_dictionary = {}
###TEST DATA
#Load data
data_test = pd.DataFrame.from_csv(path_test)
combined.append(response)
data_test = data_test[combined]
response_std_test = data_test[response].std()
#Subtract the mean and scale each column
data_test = mean_center_scale(data_test)
#Separate in to design matrix (X block) and response column vector (Y block)
predictors.pop()
X_test = data[predictors].as_matrix()
Y_test = data[[response]].as_matrix()
Y_true_test = Y_test #For prediction
#Get rank of matrix
rank_test = np.linalg.matrix_rank(X_test)
#Iterate through each component
for i in range(1,(rank+1)):
Y_pred = np.zeros((Y_true.shape[0],1))
Y_pred_test = np.zeros((Y_true_test.shape[0],1))
#Core algo
X,Y, u, w_new, q_new, t_new, p_new, q_new_dictionary, t_hat_dictionary, b_dictionary,E_h, F_h = core_pls(i,Y, X, q_new_dictionary, b_dictionary, t_hat_dictionary)
#NEW Sum over different compenents
for g in range(1,i+1):
t_hat_train = np.dot(E_h,w_new.T)
t_hat_train_dictionary[g] = t_hat_train
E_h = E_h - np.dot(t_hat_train, p_new.T)
Y_pred = y_pred(Y_pred, g,b_dictionary,t_hat_dictionary,q_new_dictionary)
#Find training RMSE
RMSE_dictionary = rmse(i,Y_true, Y_pred, response_std, RMSE_dictionary)
#Set initial E_h as X_test data
E_h_test = X_test
#Sum over different compenents
for k in range(1,i+1):
t_hat_test = np.dot(E_h_test,w_new.T)
t_hat_test_dictionary[k] = t_hat_test
E_h_test = E_h_test - np.dot(t_hat_test, p_new.T)
Y_pred_test = y_pred(Y_pred_test, k,b_dictionary,t_hat_test_dictionary,q_new_dictionary)
#Find test RMSE
RMSE_test_dictionary = rmse(i,Y_true_test, Y_pred_test, response_std_test, RMSE_test_dictionary)
return RMSE_dictionary, RMSE_test_dictionary
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/io/date_converters.py | 10 | 1827 | """This module is designed for community supported date conversion functions"""
from pandas.compat import range, map
import numpy as np
import pandas._libs.lib as lib
def parse_date_time(date_col, time_col):
date_col = _maybe_cast(date_col)
time_col = _maybe_cast(time_col)
return lib.try_parse_date_and_time(date_col, time_col)
def parse_date_fields(year_col, month_col, day_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
return lib.try_parse_year_month_day(year_col, month_col, day_col)
def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col,
second_col):
year_col = _maybe_cast(year_col)
month_col = _maybe_cast(month_col)
day_col = _maybe_cast(day_col)
hour_col = _maybe_cast(hour_col)
minute_col = _maybe_cast(minute_col)
second_col = _maybe_cast(second_col)
return lib.try_parse_datetime_components(year_col, month_col, day_col,
hour_col, minute_col, second_col)
def generic_parser(parse_func, *cols):
N = _check_columns(cols)
results = np.empty(N, dtype=object)
for i in range(N):
args = [c[i] for c in cols]
results[i] = parse_func(*args)
return results
def _maybe_cast(arr):
if not arr.dtype.type == np.object_:
arr = np.array(arr, dtype=object)
return arr
def _check_columns(cols):
if not len(cols):
raise AssertionError("There must be at least 1 column")
head, tail = cols[0], cols[1:]
N = len(head)
for i, n in enumerate(map(len, tail)):
if n != N:
raise AssertionError('All columns must have the same length: {0}; '
'column {1} has length {2}'.format(N, i, n))
return N
| mit |
felipessalvatore/CNNexample | src/tunning/fc.py | 1 | 2217 | import os
import sys
from random import randint
import numpy as np
import inspect
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import run_test, get_data_4d, get_time
from CNN import CNNModel, train_model, check_valid
from DataHolder import DataHolder
from Config import Config
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()
my_dataholder = DataHolder(train_dataset,
train_labels,
valid_dataset,
valid_labels,
test_dataset,
test_labels)
FC = [5, 10, 15, 20, 30, 40, 60, 200]
number_of_exp = len(FC)
results = []
duration = []
info = []
for i, fc in enumerate(FC):
print("\n ({0} of {1})".format(i + 1, number_of_exp))
my_config = Config(tunning=True, hidden_nodes_1=3 * fc,
hidden_nodes_2=2 * fc,
hidden_nodes_3=fc)
attrs = vars(my_config)
config_info = ["%s: %s" % item for item in attrs.items()]
info.append(config_info)
my_model = CNNModel(my_config, my_dataholder)
train_model(my_model, my_dataholder, 10001, 1000, False)
current_dur = get_time(train_model, 10001)
score = check_valid(my_model)
results.append(score)
duration.append(current_dur)
best_result = max(list(zip(results, FC, duration, info)))
result_string = """In an experiment with {0} fully connected sizes
the best one is {1} with valid accuracy = {2}.
\nThe training takes {3:.2f} seconds using the following params:
\n{4}""".format(number_of_exp,
best_result[1],
best_result[0],
best_result[2],
best_result[3])
file = open("final.txt", "w")
file.write(result_string)
file.close()
plt.plot(FC, results)
plt.xlabel("hidden_nodes_3")
plt.ylabel("valid acc")
plt.savefig("fc.png")
plt.clf()
plt.plot(FC, duration)
plt.xlabel("hidden_nodes_3")
plt.ylabel("duration (s)")
plt.savefig("fc_du.png")
plt.clf()
| mit |
hdmetor/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
anurag313/scikit-learn | sklearn/utils/tests/test_multiclass.py | 128 | 12853 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
jseabold/scikit-learn | sklearn/tests/test_naive_bayes.py | 32 | 17897 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/mpl_toolkits/axisartist/axisline_style.py | 8 | 5277 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle:
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super(AxislineStyle._Base, self).__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super(AxislineStyle.SimpleArrow, self).__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
| mit |
mjudsp/Tsallis | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/kernel_ridge.py | 16 | 6766 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
sklearn.linear_model.Ridge:
Linear ridge regression.
sklearn.svm.SVR:
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
caidongyun/BuildingMachineLearningSystemsWithPython | ch07/lr10k.py | 24 | 1228 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
# Whether to use Elastic nets (otherwise, ordinary linear regression is used)
# Load data:
data, target = load_svmlight_file('data/E2006.train')
lr = LinearRegression()
# Compute error on training data to demonstrate that we can obtain near perfect
# scores:
lr.fit(data, target)
pred = lr.predict(data)
print('RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on training, {:.2}'.format(r2_score(target, pred)))
print('')
pred = np.zeros_like(target)
kf = KFold(len(target), n_folds=5)
for train, test in kf:
lr.fit(data[train], target[train])
pred[test] = lr.predict(data[test])
print('RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
| mit |
airanmehr/Utils | Simulation.py | 1 | 40529 | '''
Copyleft Oct 10, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: airanmehr@gmail.com
'''
from __future__ import division
import numpy as np;
import pandas as pd;
np.set_printoptions(linewidth=140, precision=5, suppress=True)
import subprocess, uuid, os,sys
import pylab as plt
import UTILS.Util as utl
stdout_old=sys.stdout;sys.stdout=open('/dev/null','w');import simuPOP as sim;sys.stdout=stdout_old # to avoid simuPop welcome message!
def sig(x): return 1./(1+np.exp(-x));
def logit(p): return (np.inf if p==1 else np.log(p/(1.-p)))
a='';
def fff(msg):
global a
a += msg
class MSMS:
@staticmethod
def Simulate(n=200, mu=2*1e-9, L=50000, Ne=1e6,r=1e-9,verbose=False,seed=None,intPos=False):
L=int(L)
a= MSMS.Song(F=n, mu=mu, L=L, Ne=Ne, r=r,verbose=verbose,seed=seed)
c=pd.Series(a.columns)
if c.round().value_counts().max()==1:
a.columns=c.round().astype(int)
elif c.astype(int).value_counts().max()==1:
a.columns = c.astype(int)
if intPos:
a.columns=map(int,np.sort(np.random.choice(L, a.shape[1], replace=False)))
return a
@staticmethod
def Song(F=200, mu=2*1e-9, L=50000, Ne=1e6,r=4e-9, uid=None, theta=None, msmsFile=None, dir=None,verbose=False,seed=None):
"""
Everything is exactly the sam
"""
# print 'mu: {} r:{} NE:{} ,theta={} '.format(mu,r,Ne,4*Ne*mu*L), theta
if msmsFile is not None:
pop=MSMS.load(filename=msmsFile)[0]
else:
if theta:
pop=MSMS.MSMS(n=F, numReps=1, theta=theta, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
else:
pop=MSMS.MSMS(n=F, numReps=1, theta=2*Ne*mu*L, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
pop.r=r
pop.Ne=Ne
pop.L=L
return pop
@staticmethod
def MSMS(n, numReps, theta, rho, L, Ne=None,uid=None,oneMutationEvery=None, dir=dir,verbose=False,seed=None):
"""
Returns a list of dataframe for each replicate
"""
if dir is None:
dir= utl.PATH.simout;dir+= 'msms/';
os.system('mkdir -p ' +dir)
if oneMutationEvery is not None:
nSS=L/oneMutationEvery
theta=nSS/sum(1./np.arange(1,n))
if uid is None:
uid=str(uuid.uuid4())
unique_filename = dir+uid+'.msms'
if seed is None:
seed=''
else:
seed=' -seed {} '.format(seed)
cmd="java -jar -Xmx2g ~/bin/msms/lib/msms.jar -ms {} {} -t {:.0f} -r {:.0f} {:.0f} -oFP 0.000000000000E00 {} > {}".format(n, numReps, theta, rho, L, seed,unique_filename)
if verbose:
print cmd
subprocess.call(cmd,shell=True)
return MSMS.load(unique_filename)
@staticmethod
def getSeed(filename):
file=open(filename);cmd=np.array(file.readline().strip().split(' '));seed=file.readline().strip()
return seed
@staticmethod
def load(filename):
n, R, L, posUnderSelection = MSMS.getParams(open(filename).readline())
lines=np.array(map(str.strip,open(filename).readlines()) )
posIdx= np.where(map(lambda x: x[:len('positions:')]=='positions:',lines))[0]
try:
theta = lines[np.where(map(lambda x: 'ThetaW Estimate Summaray:' in x, lines))[0][0]].split(':')[1].strip()
except:
theta = None
POS=[map(lambda x: (float(x)*L), lines[ii].split()[1:]) for ii in posIdx]
dfs=[pd.DataFrame(map(list ,lines[i +1 +range(n)]),columns=pos ) for i,pos in zip(posIdx,POS)]
for df in dfs:
df[df!='0']=1
df[df=='0']=0
df.L = L
if posUnderSelection is not None:
df.posUnderSelection = posUnderSelection * L
if theta is not None:
df.stat = pd.Series(theta.split(), index=['W', 'Pi', 'D']).astype(float)
return dfs
@staticmethod
def getParams(line):
"""
Args:
params: takes the first line of msmsm file
Returns:
n,R,L: number of individuals in the sample, the number of the replicates, genome length
"""
params=np.array(line.strip().split(' '))
offset=np.where(map(lambda x: 'ms'in x, params))[0][0]
if params[offset+1] == '-N':
i=3
else:
i=1
posUnderSelection = None
if '-Sp' in params: posUnderSelection = float(params[np.where(params == '-Sp')[0][0] + 1])
return int(params[offset + i]), int(params[offset + i + 1]), int(
params[np.where(params == '-r')[0][0] + 2]), posUnderSelection
@staticmethod
def fixDuplicatePositions(pos,L):
pos=pd.Series(range(len(pos)),index=pos)
posHits=pos.index.value_counts()
invalidPOS=posHits[posHits>1]
if not invalidPOS.shape[0]:
return pos.index.values
for invalidPos in invalidPOS.index:
mini=pos.loc[invalidPos].min()
maxi=pos.loc[invalidPos].max()
lowerBound=pos[pos==mini-1].index.max()
upperBound=pos[pos==maxi+1].index.min();
if maxi==pos.shape[0]-1: upperBound=L
if mini==0: lowerBound=0
validRange=np.arange((upperBound-lowerBound)/2) # only second and third quartiles,
offset=validRange+validRange.shape[0]/2 # first qunatulw
newPos=pos.index.values;
newPos[mini:maxi+1]=np.sort(np.random.choice(offset,pos.loc[invalidPos].shape[0],replace=False))+lowerBound
pos.index=newPos
assert pos.index.value_counts().max()==1
return pos.index.values
@staticmethod
def Selection(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens, path):
seed = ''
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
# while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionFinale(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens,
path):
seed = ''
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
while (nu0 < 0.9):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionNu(msms, Ne, n, numReplicates, theta, rho, window_size, s, posUnderSelection, nu, path=None):
seed = ''
if path is None: path = '~/tmp.msms'
fname = path + '{}.msms'.format(nu)
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SF 0 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, nu, posUnderSelection,
('-seed {}'.format(seed), '')[seed is ''], fname)
print cmd
os.system(cmd)
return MSMS.load(fname)
@staticmethod
def SelectionNuForward(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection,
gens, path):
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(gen)
if (not ii) and s != 0:
while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
print nu0, gen, cmd
if not ii: seed = MSMS.getSeed(fname)
class Simulation:
@staticmethod
def setSeed(seed):
if seed is None: return
sim.setRNG('rand', seed + 1);
np.random.seed(seed)
@staticmethod
def load(ExperimentName, s=0.1, L=50000, experimentID=0, nu0=0.005, isFolded=False, All=False, startGeneration=0,
maxGeneration=50, numReplicates=3, numSamples=5, step=10, replicates=None, coverage=np.inf):
path='{}{}/simpop/'.format(utl.PATH.simout, ExperimentName) + Simulation.getSimulationName(s=s, L=L, experimentID=experimentID, initialCarrierFreq=nu0, isFolded=isFolded) + '.pkl'
sim= pd.read_pickle(path)
sim.savedPath=path
if replicates is not None: sim.setReplicates(sorted(replicates))
elif numReplicates is not None: sim.setReplicates(range(numReplicates))
if coverage != np.inf:
sim.Xi = sim.X
sim.X = sim.C.loc[coverage] / sim.D.loc[coverage].astype(float)
sim.X = np.array(map(lambda x: utl.roundto(x, 5), sim.X.reshape(-1) * 1e4)).reshape(sim.X.shape) / 1e4
sim.CD=sim.getCD(coverage)
sim.CD.columns.names=['REP','GEN','READ']
if not All: sim.setSamplingTimes(maxGeneration=min(maxGeneration,sim.getGenerationTimes()[-1]),numSamples=numSamples,step=step,startGeneration=startGeneration)
return sim
@staticmethod
def getSimulationName(s,L,experimentID,initialCarrierFreq,isFolded,msms=False):
if msms:
return 'L{:.0f}K.{:04.0f}'.format(L/1000,experimentID)
if s:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(np.round(float(initialCarrierFreq), 3), s, L / 1000,
experimentID, ('', '.Folded')[isFolded])
else:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(0, s * 100, L / 1000, experimentID,
('', '.Folded')[isFolded])
def setReplicates(self,replicates):
self.numReplicates=len(replicates)
self.X=self.X[:,:,replicates]
self.C = self.C.apply(lambda x: x[:, :, replicates])
self.D = self.D.apply(lambda x: x[:, :, replicates])
def __init__(self, outpath=utl.PATH.simout, N=1000, generationStep=10, maxGeneration=None,
s=0.05, r=4e-9, Ne=1e6, mu=2e-9, F=200, h=0.5, L=50000, startGeneration=0, numReplicates=3, H0=None,
foldInitialAFs=False, save=True, foutName=None,
doForwardSimulationNow=True, experimentID=-1,
msmsFile=None,initialCarrierFreq=0, ExperimentName=None, simulateNeutrallyFor=0,
initialNeutralGenerations=0, ignoreInitialNeutralGenerations=True,
makeSureSelectedSiteDontGetLost=True, onlyKeep=None, verbose=0, sampingTimes=None, minIncrease=0,
model=None,initDiploidPop=None,posUnderSelection=-1,haplotypes=False,seed=None,recombinator=None
):
"""
A General Simulation Class; with params
H0: Dataframe F x m for F individuals and m segregation sites ; Initial Haplotypes; dataframe with columns as positions
"""
self.recombinator=recombinator
if seed is not None:
Simulation.setSeed(seed)
self.s = s;
self.r = r;
self.Ne = Ne;
self.mu = mu;
self.F = F;
self.h = h;
self.L = int(L);
self.startGeneration = startGeneration;
self.numReplicates = numReplicates;
self.posUnderSelection = -1
self.initDiploidPop = initDiploidPop
self.initialCarrierFreq= initialCarrierFreq if initialCarrierFreq else 1./self.F
if foutName is not None:
self.uid=foutName
self.uidMSMS=None
elif experimentID>=0:
self.uid=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs)
self.uidMSMS=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs,msms=True)
else:
self.uid=str(uuid.uuid4())
self.uidMSMS=self.uid
if H0 is None:
self.simulateH0()
H0=self.H0
else:
self.setH0(H0);
if posUnderSelection >= 0:
if self.positions is None:
self.positions=map(int, self.initDiploidPop.lociPos())
self.set_posUnderSelection(posUnderSelection)
assert ExperimentName != None
self.save=save
self.model=model
self.minIncrease = minIncrease
self.samplingTimes=sampingTimes
self.initialNeutralGenerations=initialNeutralGenerations
self.onlyKeep=onlyKeep
self.makeSureSelectedSiteDontGetLost=makeSureSelectedSiteDontGetLost
self.ignoreInitialNeutralGenerations=ignoreInitialNeutralGenerations
self.msmsFile=msmsFile;self.outpath=outpath; self.outpath=outpath ; self.N=N; self.generationStep=generationStep; self.maxGeneration= maxGeneration;
self.foldInitialAFs=foldInitialAFs;self.doForwardSimulationNow=doForwardSimulationNow;self.experimentID=experimentID
self.simulateNeutrallyFor=simulateNeutrallyFor
self.setH0(H0);
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpath+=ExperimentName
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpathmsms=self.outpath+'/msms/';self.outpath+='/simpop/'
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
if not os.path.exists(self.outpathmsms) : os.makedirs(self.outpathmsms)
if self.maxGeneration is None: self.maxGeneration=Simulation.getFixationTime(self.s, Ne=self.F, roundto10=True)
self.theta=2*self.Ne*self.mu*self.L
self.pops=[]
if self.model is None:
import simuPOP.demography as dmg
self.model=dmg.LinearGrowthModel(T=self.maxGeneration, N0=self.N, NT=self.N)
if self.doForwardSimulationNow:
self.forwardSimulation()
@staticmethod
def simulateSingleLoci(nu0=0.005, T=100, s=0.1, N=1000,verbose=True,h=0.5,seed=None):
if verbose:
print '.',
step = 1
Simulation.setSeed(seed)
pop = sim.Population(size=N, ploidy=2, loci=[1],infoFields=['fitness']);sim.initGenotype(pop, prop=[1-nu0,nu0]);simulator = sim.Simulator(pop.clone(), rep=1);
# sim.stat(pop, alleleFreq=[0]); print pop.dvars().alleleFreq[0][1]
global a;a = "0;;{}\n".format(nu0)
simulator.evolve(initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=0, fitness={(0, 0): 1, (0, 1): 1 + s *h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(), postOps=[sim.Stat(alleleFreq=[0], step=step),
sim.PyEval("'%d;;' % (gen+1)", reps=0, step=step,
output=fff), sim.PyEval(
r"'{}\n'.format(map(lambda x: round(x[1],5),alleleFreq.values())[0])", step=step, output=fff)],
gen=T)
return pd.DataFrame(zip(*map(lambda x: x.split(';;'), a.strip().split('\n')))).T.set_index(0)[1].astype(float)
def createInitialDiploidPopulation(self):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
if self.initDiploidPop is not None: return self.initDiploidPop
assert int(2*self.N/self.F)==2*self.N/float(self.F) # N should be a multiplier of F
nSS=self.H0.shape[1];n=int(self.N/self.F)
try:
pop = sim.Population(size=self.N, ploidy=2, loci=nSS,lociPos=list(self.positions), infoFields='fitness')
except:
import traceback
print(traceback.format_exc())
print list(self.positions), nSS,n,self.H0.shape[0]
exit()
assert (self.N % self.H0.shape[0]) ==0
H= [[list(h.values),list(h.values)] for _ in range(n) for _,h in self.H0.iterrows()]
for (i,h) in zip(pop.individuals(),H): # for each indv assing first and second chromosome
i.setGenotype(h[0],0 );i.setGenotype(h[1],1 ) #homozygote population of diploid
# sim.stat(pop, alleleFreq=range(nSS));print np.array([pop.dvars().alleleFreq[x][1] for x in range(nSS)])
return pop
@staticmethod
def getGT(pop, i=None, pos=None):
if i == None and pos == None:
df = pd.concat([pd.DataFrame([list(i.genotype(0)) for i in pop.individuals()]),
pd.DataFrame([list(i.genotype(1)) for i in pop.individuals()])],
keys=[0, 1]).sort_index().reorder_levels([1, 0]).sort_index()
df.columns = map(int, pop.lociPos())
return df
i = np.where(np.array(pop.lociPos()).astype(int) == pos)[0][0]
a, b = [], []
for ind in pop.individuals():
a += [ind.genotype(0)[i]]
b += [ind.genotype(1)[i]]
return pd.concat([pd.Series(a), pd.Series(b)], keys=[0, 1]).reorder_levels([1, 0]).sort_index()
@staticmethod
def createDiploidPopulationFromDataFrame(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0]/2, ploidy=2, loci=df.shape[1], lociPos=list(df.columns), infoFields='fitness')
for j,i in enumerate(pop.individuals()): # for each indv assing first and second chromosome
i.setGenotype(df.loc[j].loc[0].tolist(),0 );i.setGenotype(df.loc[j].loc[1].tolist(),1 )
return pop
@staticmethod
def _simualtePop(pop, s=0, h=0.5, r=2e-8, siteUnderSelection=0,gen=1,recombinator=None,seed=None):
"Gets population and returns population"
Simulation.setSeed(seed)
simulator = sim.Simulator(pop.clone(), rep=1)
if recombinator is None:recombinator=sim.Recombinator(intensity=r)
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=recombinator),
gen=gen)
return simulator.population(0).clone()
@staticmethod
def _simualte(pop,s,h,r,siteUnderSelection,positions,startGeneration,generationStep,maxGeneration,model=None,makeSureSelectedSiteDontGetLost=True):
"Gets population and returns Dataframe, Static method"
N = int(pop.popSize())
if model is None:
import simuPOP.demography as dmg
model = dmg.LinearGrowthModel(T=maxGeneration, N0=N, NT=N)
simulator = sim.Simulator(pop.clone(), rep=1)
global a;a = ""
pops=[]
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=sim.Recombinator(intensity=r),subPopSize=model),
postOps=[sim.Stat(alleleFreq=range(int(pop.numLoci()[0])), step=step), sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
print a
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
print data
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, siteUnderSelection] or s == 0 or not makeSureSelectedSiteDontGetLost:
try:
pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(startGeneration/generationStep):,:]
else:
return Simulation._simualte()
def simualte(self):
"Gets population and returns Dataframe, Class method"
import simuPOP.demography as dmg
# model=dmg.ExponentialGrowthModel(T=50, N0=1000, NT=200)
simulator = sim.Simulator(self.initDiploidPop.clone(), rep=1)
# sim.dump(self.initDiploidPop)
global a;a = ""
if self.recombinator is None:
self.recombinator=sim.Recombinator(intensity=self.r)
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=self.siteUnderSelection, fitness={(0,0):1, (0,1):1+self.s*self.h, (1,1):1+self.s}),
matingScheme=sim.RandomMating(ops=self.recombinator,subPopSize=self.model),
postOps=[sim.Stat(alleleFreq=range(len(self.positions)), step=step),
sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = self.maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, self.siteUnderSelection] or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
try:
self.pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(self.startGeneration/self.generationStep):,:]
else:
# print pd.Series(data[:, self.siteUnderSelection])
return self.simualte()
def simulateH0(self):
self.H0=MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu,uid=self.uidMSMS)
def set_siteUnderSelection(self,x):
self.siteUnderSelection=x
self.posUnderSelection=self.positions[self.siteUnderSelection]
def set_posUnderSelection(self,x):
self.posUnderSelection=x
self.siteUnderSelection=np.where(self.positions==self.posUnderSelection)[0][0]
def setH0(self,H0):
self.H0=H0
self.positions=self.H0.columns.values
self.F=self.H0.shape[0]
def set_BeneficialLoci(self,selectionOnRandomSite=False,siteUnderSelection=None,posUnderSelection =None):
if selectionOnRandomSite:
self.set_siteUnderSelection(np.random.randint(0,self.H0.shape[1]))
elif siteUnderSelection is not None:
self.set_siteUnderSelection(siteUnderSelection)
elif posUnderSelection is not None:
self.set_siteUnderSelection(posUnderSelection)
else:
if not self.s:
self.set_siteUnderSelection(self.X0.argmax())
else:
sites=np.sort(np.where(self.X0== self.initialCarrierFreq)[0]);
if not len(sites):
sites=np.sort(np.where(( self.X0 <= self.initialCarrierFreq +0.025) & ( self.X0 >= self.initialCarrierFreq -0.025) ) [0]);
if not len(sites):
print 'Try again. No site at freq ',self.initialCarrierFreq, self.uid; return
self.set_siteUnderSelection(sites[np.random.randint(0,len(sites))])
def createInitHaps(self):
assignPositions=True
if self.H0 is None:
H0 = MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu, uid=self.uidMSMS,
msmsFile=self.msmsFile, dir=self.outpathmsms)
else:
H0 = self.H0
assignPositions=False
if self.foldInitialAFs:
idx = H0.mean(0) > 0.5
H0.iloc[:, idx.values] = 1 - H0.iloc[:, idx.values]
self.setH0(H0)
if assignPositions:
self.positions_msms = self.H0.columns.values.copy(True)
self.positions = sorted(np.random.choice(self.L, self.H0.shape[1], replace=False))
self.H0 = pd.DataFrame(self.H0.values, columns=self.positions)
self.X0 = self.H0.mean(0).values
def forwardSimulation(self):
"""
returns np 3D array T x nSS x R which T=|{t_1,t_2,..}| (nnumber of times), nSS is number of SS , and R is the number of replicates
"""
import numpy as np
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
if self.posUnderSelection<0 and self.initDiploidPop is None:
self.createInitHaps()
self.set_BeneficialLoci()
self.initDiploidPop=self.createInitialDiploidPopulation()
elif self.initDiploidPop is None:
self.createInitHaps()
self.initDiploidPop = self.createInitialDiploidPopulation()
# self.X0=self.H0.mean().values
else:
self.X0=Simulation.getGT(self.initDiploidPop).mean().values
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
# print pd.concat([df.mean(),self.H0.mean().reset_index(drop=True)],1)
self.X=np.array([self.simualte() for _ in range(self.numReplicates)]).swapaxes(0, 2).swapaxes(0, 1)
self.X=np.append(np.tile(self.X0[:,None],(1,self.X.shape[2]))[None,:,:],self.X,axis=0)
self.sampleDepths()
if self.save:
pd.to_pickle(self,self.outpath+self.uid+'.pkl')
# self.createDF()
def getGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(0,self.maxGeneration-self.startGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
def getTrueGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(self.startGeneration,self.maxGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
@staticmethod
def getFixationTime(s,Ne=200,roundto10=True):
if s==0: s=0.01
t=-4*int(logit(1./Ne)/s)
if roundto10:
return (t//10 +1)*10
else:
return t
@staticmethod
def sampleInitSamplingTime(s,Ne=200,phase=0,samplingWindow=50,startOfEpoch=False):
fix=Simulation.getFixationTime(s, Ne=Ne)
if phase==0: lower,upper=(0, fix-samplingWindow)
if phase==1: lower,upper=(0, fix/3-samplingWindow)
if phase==2: lower,upper=(fix/3, 2*fix/3-samplingWindow)
if phase==3: lower,upper=(2*fix/3, fix-samplingWindow)
if startOfEpoch:
rnd=lower
else:
rnd=np.random.randint(lower,max(lower,upper)+1)
return int(rnd)//10 *10
@staticmethod
def sampleStartTimesforAlls(samplingWindow=50):
S=[0.1, 0.05, 0.02, 0.01,0]
for phase in [1,2,3]:
pd.DataFrame([[Simulation.sampleInitSamplingTime(s, phase=phase, samplingWindow=samplingWindow, startOfEpoch=True) for _ in range(100)] for s in S], index=S).T.to_pickle('/home/arya/out/startSamplingTimes.phase{}.sampleWin{}.pkl'.format(phase, samplingWindow))
def setSamplingTimes(self,maxGeneration=None,numSamples=5,step=None,startGeneration=None):
GT=pd.Series(range(len(self.getTrueGenerationTimes(includeZeroGeneration=True))),index=self.getTrueGenerationTimes(includeZeroGeneration=True))
if startGeneration is not None: self.startGeneration=startGeneration
if maxGeneration is not None: self.maxGeneration = maxGeneration
if step is not None:self.generationStep=step
else: self.generationStep=(self.maxGeneration-self.startGeneration)/numSamples
i = GT.loc[self.getTrueGenerationTimes(includeZeroGeneration=True)[:self.X.shape[0]]].values
self.X = self.X[i, :, :]
self.C = self.C.apply(lambda x: x[i, :, :])
self.D = self.D.apply(lambda x: x[i, :, :])
self.X0=self.X[0,:,0]
@staticmethod
def getSamplingTimeBasedOnFreq(sim,phase,samplingWin=50):
carrier_freq=[0.1,0.5,0.9][phase-1]
a= np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0]
ft=sim.getTrueGenerationTimes().max()
if len(a):
t= sim.getTrueGenerationTimes()[np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0].min()]
else:
t=sim.getTrueGenerationTimes().max()
return min(t,ft-samplingWin)
@staticmethod
def Load(s=0.1, experimentID=0, nu0=0.005, numReplicates=3, step=10, ModelName='TimeSeries', samplingWindow=50,
L=50000, depthRate=30):
if not s: nu0=0.005
sim = Simulation.load(s=s, experimentID=experimentID % 100, nu0=nu0, numReplicates=numReplicates, step=step,
ExperimentName=ModelName, All=True, L=L, replicates=range(numReplicates),
coverage=depthRate)
sim.experimentID=experimentID
startGen=0
sim.setSamplingTimes(maxGeneration=min(startGen+samplingWindow,sim.getTrueGenerationTimes()[-1]),step=step,startGeneration=startGen)
sim.createDF()
return sim
def getHardSweepMutations(self):
MAF=1./self.H0.shape[0]
dups=self.H0[self.H0.duplicated()]
x0=pd.Series(self.X0, index=self.positions)
hard=[]
for _,dup in dups.iterrows():
numDup=self.H0.apply(lambda x:(x==dup).all(),axis=1).sum()
hard=np.append(hard, (dup*x0==numDup*MAF).replace({False:None}).dropna().index.values)
hard=np.sort(np.append(hard,(x0==MAF).replace({False:None}).dropna().index.values).astype(int))
return hard
@property
def df(self):
reps=range(self.numReplicates)
self.df=pd.concat([pd.DataFrame(self.X[:,:,r],columns=self.positions,index=pd.MultiIndex.from_product([[r],range(self.X.shape[0])],names=['REP','TIME'])).T for r in reps],axis=1)
if self.numReplicates==1:
self.df=self.df[0]
return self.df
def computeCDi(self, EE, depthRate):
E = EE.loc[depthRate]
index = pd.Series(range(E.shape[0]), E.index)
C = pd.concat([pd.DataFrame(self.C.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
D = pd.concat([pd.DataFrame(self.D.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
self.cd = pd.concat([pd.Series(zip(C[i], D[i])) for i in C.columns], axis=1)
self.cd.columns = C.columns;
self.cd.index = C.index
self.cdi = self.cd.applymap(lambda x: index.loc[x])
def sampleDepths(self,depths = [30, 100, 300]):
self.D = pd.Series(None, index=depths)
self.C = pd.Series(None, index=depths)
for depthRate in depths:
self.D.loc[depthRate] = np.random.poisson(depthRate,
self.X.shape[0] * self.X.shape[1] * self.X.shape[2]).reshape(
self.X.shape).astype(object)
self.C.loc[depthRate] = np.array([np.random.binomial(d, x) for x, d in
zip(self.X.reshape(-1), self.D.loc[depthRate].reshape(-1))]).reshape(
self.X.shape).astype(object)
@staticmethod
def sampleDepthX(X,cov):
D= np.random.poisson(cov,X.size)
C= np.array([np.random.binomial(d, x) for x, d in zip(X, D)])
return C,D
@staticmethod
def sampleDepthXSeries(X,cov):
C,D=Simulation.sampleDepthX(X.values,cov)
a=pd.DataFrame([C,D],columns=X.index,index=['C','D']).T
return a
@staticmethod
def computeCDdf(a, E):
index = pd.Series(range(E.shape[0]), E.index)
def f(x):
try:
return index.loc[x]
except:
return -1
z=a.groupby(level=[0,1],axis=1).apply(lambda x: x.apply(lambda y:(y.iloc[0],y.iloc[1]),1)).applymap(f)
return z[(z<0).sum(1)==0]
def getCD(self,coverage):
T=self.getTrueGenerationTimes()
Ti=T
if T[-1]!=self.C[coverage].shape[0]-1: Ti=range(self.C[coverage].shape[0])
C=pd.concat([pd.DataFrame(self.C[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
D=pd.concat([pd.DataFrame(self.D[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
CD=pd.concat([C,D],1,keys=['C','D']).reorder_levels([1,2,0],1).sort_index(1)
CD.columns.names=['REP','GEN','READ']
return CD
@staticmethod
def Recombinator(rate, loci):
"""
Recombination at loci, after variant index. Loci can take value in [0, NumSNPs-1]
Args:
rate: recombination rate
loci: index of the loci in which rec is is being performed
Returns: recombinator which is an argument of Simulation, _simulation2 and evolve. It can be list of loci
"""
if not isinstance(loci, list):
loci = [loci]
return sim.Recombinator(intensity=rate, loci=loci)
class POP:
@staticmethod
def createISOGenicDiploidPopulation(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0], ploidy=2, loci=df.shape[1], lociPos=list(df.columns),
infoFields='fitness')
for (i, (_, h)) in zip(pop.individuals(), df.iterrows()):
i.setGenotype(h.tolist(), 0);
i.setGenotype(h.tolist(), 1)
return pop
@staticmethod
def toDF(pop):
x = pd.concat(map(pd.DataFrame, [map(list, [i.genotype(0), i.genotype(1)]) for i in pop.allIndividuals()]),
keys=range(pop.popSize()))
x.columns = list(pop.lociPos())
return x
@staticmethod
def freq(pop):
sim.stat(pop, alleleFreq=range(pop.numLoci()[0]), vars=['alleleFreq'])
return pd.Series(pd.DataFrame(pop.vars()['alleleFreq']).loc[1].reindex().values,map(int,pop.lociPos())).fillna(0)
@staticmethod
def Haplotypes(pop,counts=False,unique=True):
if isinstance(pop,sim.Population):
a=POP.toDF(pop)
else:
a=pop
H=a.reset_index(drop=True)
H.columns=map(int,H.columns)
b=H.loc[H.sum(1).sort_values().index].astype(str).apply(lambda x: ''.join(x), 1).reset_index(drop=True)
if counts:
return b.value_counts().sort_index()
else:
if unique:
b=b.drop_duplicates()
return b.loc[b.sort_values().index].reset_index(drop=True)
@staticmethod
def establish(H, ba, k=5):
N = H.shape[0]
car = H[H[ba] == 1]
n = car.shape[0]
return pd.concat([car.iloc[np.random.choice(n, k)], H.iloc[np.random.choice(N, N - k)]]).reset_index(drop=True)
class Drift:
@staticmethod
def nextGeneration(N,x):
return (np.random.random(N)<=x).mean()
@staticmethod
def sampleReads(D,x):
return [Drift.sampleReadsDerived(D,x),D]
@staticmethod
def sampleReadsDerived(D,x):
return (np.random.random(D)<=x).sum()
@staticmethod
def simulateAF(N,x,T):
Xt=[]
for i in range(1, T[-1]+1):
x=Drift.nextGeneration(N,x)
if i in T:Xt.append(x)
return Xt
@staticmethod
def simulatePoolCD(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
y=Drift.nextGeneration(n,x)
Xt.append(Drift.sampleReads(D[i], y))
return pd.DataFrame([[cd[0].C,cd[0].D]]+Xt,index=D.index,columns=['C','D'])
@staticmethod
def simulatePoolDerivd(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
Xt+=[Drift.sampleReadsDerived(D[i], Drift.nextGeneration(n,x))]
return [cd[0].C]+Xt
@staticmethod
def simulatePools(N,cd,M):
return pd.concat([Drift.simulatePool(N,cd) for _ in range(M)],keys=range(M))
@staticmethod
def simulateAFs(N,x,T,M):
return pd.DataFrame([Drift.simulateAF(N,x,T) for _ in range(M)],columns=T)
| mit |
ansobolev/regCMPostProc | src/plot.py | 1 | 2816 | #!/usr/bin/env python
# RegCM postprocessing tool
# Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from value import Value
class Plotter(object):
def __init__(self, value):
self._value = value
self.lat, self.lon = value.latlon
def plot(self, coastlines=True,
countries=True,
places=True,
title=None,
levels = None):
if levels is not None:
l_min, l_max = levels
l = (l_max - l_min) / 10
levels = range(l_min, l_max + l, l)
projection = ccrs.PlateCarree()
self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection})
if coastlines:
self.ax.coastlines('10m')
if countries:
countries = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='admin_0_countries')
self.ax.add_feature(countries, color='r', alpha=0.1)
if places:
places = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='populated_places')
self.ax.add_feature(places, color='b', hatch='o')
cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels)
# To mask out OCEAN or LAND
#ax.add_feature(cfeature.OCEAN)
#ax.add_feature(cfeature.LAND)
self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='blue', alpha=0.5, linestyle='-')
self.fig.colorbar(cx)
times = self._value.limits['time']
plt.title(self._value.title + ' [' + self._value.units + ']\n' +
'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n')
def show(self):
plt.show()
def save(self, filename, format):
plt.savefig(filename + '.' + format)
def close(self):
plt.close(self.fig)
if __name__ == "__main__":
pass | gpl-3.0 |
wmvanvliet/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1959 | """
======================================================
Compute source power spectral density (PSD) in a label
======================================================
Returns an STC file containing the PSD (in dB) of each of the sources
within a label.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label,
dB=True)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
destijl/forensicartifacts | frontend/thirdparty/networkx-1.9/examples/graph/napoleon_russian_campaign.py | 44 | 3216 | #!/usr/bin/env python
"""
Minard's data from Napoleon's 1812-1813 Russian Campaign.
http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2006 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import string
import networkx as nx
def minard_graph():
data1="""\
24.0,54.9,340000,A,1
24.5,55.0,340000,A,1
25.5,54.5,340000,A,1
26.0,54.7,320000,A,1
27.0,54.8,300000,A,1
28.0,54.9,280000,A,1
28.5,55.0,240000,A,1
29.0,55.1,210000,A,1
30.0,55.2,180000,A,1
30.3,55.3,175000,A,1
32.0,54.8,145000,A,1
33.2,54.9,140000,A,1
34.4,55.5,127100,A,1
35.5,55.4,100000,A,1
36.0,55.5,100000,A,1
37.6,55.8,100000,A,1
37.7,55.7,100000,R,1
37.5,55.7,98000,R,1
37.0,55.0,97000,R,1
36.8,55.0,96000,R,1
35.4,55.3,87000,R,1
34.3,55.2,55000,R,1
33.3,54.8,37000,R,1
32.0,54.6,24000,R,1
30.4,54.4,20000,R,1
29.2,54.3,20000,R,1
28.5,54.2,20000,R,1
28.3,54.3,20000,R,1
27.5,54.5,20000,R,1
26.8,54.3,12000,R,1
26.4,54.4,14000,R,1
25.0,54.4,8000,R,1
24.4,54.4,4000,R,1
24.2,54.4,4000,R,1
24.1,54.4,4000,R,1"""
data2="""\
24.0,55.1,60000,A,2
24.5,55.2,60000,A,2
25.5,54.7,60000,A,2
26.6,55.7,40000,A,2
27.4,55.6,33000,A,2
28.7,55.5,33000,R,2
29.2,54.2,30000,R,2
28.5,54.1,30000,R,2
28.3,54.2,28000,R,2"""
data3="""\
24.0,55.2,22000,A,3
24.5,55.3,22000,A,3
24.6,55.8,6000,A,3
24.6,55.8,6000,R,3
24.2,54.4,6000,R,3
24.1,54.4,6000,R,3"""
cities="""\
24.0,55.0,Kowno
25.3,54.7,Wilna
26.4,54.4,Smorgoni
26.8,54.3,Moiodexno
27.7,55.2,Gloubokoe
27.6,53.9,Minsk
28.5,54.3,Studienska
28.7,55.5,Polotzk
29.2,54.4,Bobr
30.2,55.3,Witebsk
30.4,54.5,Orscha
30.4,53.9,Mohilow
32.0,54.8,Smolensk
33.2,54.9,Dorogobouge
34.3,55.2,Wixma
34.4,55.5,Chjat
36.0,55.5,Mojaisk
37.6,55.8,Moscou
36.6,55.3,Tarantino
36.5,55.0,Malo-Jarosewii"""
c={}
for line in cities.split('\n'):
x,y,name=line.split(',')
c[name]=(float(x),float(y))
g=[]
for data in [data1,data2,data3]:
G=nx.Graph()
i=0
G.pos={} # location
G.pop={} # size
last=None
for line in data.split('\n'):
x,y,p,r,n=line.split(',')
G.pos[i]=(float(x),float(y))
G.pop[i]=int(p)
if last is None:
last=i
else:
G.add_edge(i,last,{r:int(n)})
last=i
i=i+1
g.append(G)
return g,c
if __name__ == "__main__":
(g,city)=minard_graph()
try:
import matplotlib.pyplot as plt
plt.figure(1,figsize=(11,5))
plt.clf()
colors=['b','g','r']
for G in g:
c=colors.pop(0)
node_size=[int(G.pop[n]/300.0) for n in G]
nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5)
nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k')
for c in city:
x,y=city[c]
plt.text(x,y+0.1,c)
plt.savefig("napoleon_russian_campaign.png")
except ImportError:
pass
| apache-2.0 |
zarafagroupware/python-zarafa | scripts/z-barplot.py | 2 | 1667 | #!/usr/bin/env python
import zarafa
import matplotlib.pyplot as plt
def opt_args():
parser = zarafa.parser('skpc')
parser.add_option('--save', dest='save', action='store', help='Save plot to file (png)')
return parser.parse_args()
def b2m(bytes):
return (bytes / 1024) / 1024
def main():
options, args = opt_args()
users = list(zarafa.Server(options).users())
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
ind = range(0, len(users))
data = [b2m(user.store.size) for user in users]
rects1 = ax.bar(ind, data, width, color='r')
data = [len(list(user.store.folders())) for user in users]
rects2 = ax.bar([offset + width for offset in ind], data, width, color='g')
data =[sum(folder.count for folder in user.store.folders()) for user in users]
rects3 = ax.bar([offset + width * 2 for offset in ind], data, width, color='b')
ax.legend( (rects1[0], rects2[0], rects3[0]), ('Store size (Mb)', 'Folders', 'Items') )
ax.set_ylabel('Values')
ax.set_title('Store size, Folder, Items per user')
ax.set_xticks([offset + width for offset in ind])
ax.set_xticklabels([user.name for user in users])
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
if options.save:
plt.savefig(options.save)
else:
plt.show()
if __name__ == '__main__':
main()
| agpl-3.0 |
tosolveit/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <mdroe@stsci.edu>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| agpl-3.0 |
sevenian3/ChromaStarPy | solartest.py | 1 | 6462 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "SolFluxAtlas2005/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "fluxspliced.2005"
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #Special one-line header
print(inLine)
fields = inLine.split()
numStr = fields[0].strip() #first field is number of following records
num = int(numStr)
waveSun = [0.0 for i in range(num)]
fluxSun = [0.0 for i in range(num)]
for i in range(num):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
waveSun[i] = float(wavStr); fluxSun[i] = float(flxStr)
pylab.plot(waveSun, fluxSun, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "5777.0"
loggStr = "4.44"
logZStr = "0.0"
massStarStr = "1.0"
xiTStr = "1.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.0"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "2.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 5777.0
logg = 4.44
log10ZScale = 0.0
lambdaStart = 390.0
lambdaStop = 400.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000) | mit |
frank-tancf/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 25 | 45729 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
OrkoHunter/networkx | examples/graph/atlas.py | 54 | 2609 | #!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
| bsd-3-clause |
pv/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
herilalaina/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 82 | 1671 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is
completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y, edgecolor='black', s=20)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/mpl_examples/misc/font_indexing.py | 4 | 1299 | """
A little example that shows how the various indexing into the font
tables relate to one another. Mainly for mpl developers....
"""
import matplotlib
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, KERNING_UNFITTED, KERNING_UNSCALED
#fname = '/usr/share/fonts/sfd/FreeSans.ttf'
fname = matplotlib.get_data_path() + '/fonts/ttf/Vera.ttf'
font = FT2Font(fname)
font.set_charmap(0)
codes = font.get_charmap().items()
#dsu = [(ccode, glyphind) for ccode, glyphind in codes]
#dsu.sort()
#for ccode, glyphind in dsu:
# try: name = font.get_glyph_name(glyphind)
# except RuntimeError: pass
# else: print '% 4d % 4d %s %s'%(glyphind, ccode, hex(int(ccode)), name)
# make a charname to charcode and glyphind dictionary
coded = {}
glyphd = {}
for ccode, glyphind in codes:
name = font.get_glyph_name(glyphind)
coded[name] = ccode
glyphd[name] = glyphind
code = coded['A']
glyph = font.load_char(code)
#print glyph.bbox
print glyphd['A'], glyphd['V'], coded['A'], coded['V']
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_DEFAULT)
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNFITTED)
print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNSCALED)
print 'AV', font.get_kerning(glyphd['A'], glyphd['T'], KERNING_UNSCALED)
| gpl-2.0 |
shikhardb/scikit-learn | sklearn/utils/random.py | 19 | 10413 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(int)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
np.random.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
napjon/moocs_solution | ml-udacity/tools/startup.py | 5 | 1048 | #!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20110402.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20110402.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20110402.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
| mit |
scipy/scipy | scipy/odr/models.py | 19 | 7660 | """ Collection of Model instances for use with the odrpack fitting package.
"""
import numpy as np
from scipy.odr.odrpack import Model
__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
'polynomial']
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float),
np.power(x, powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1), axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
class _MultilinearModel(Model):
r"""
Arbitrary-dimensional linear model
This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i`
Examples
--------
We can calculate orthogonal distance regression with an arbitrary
dimensional linear model:
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = 10.0 + 5.0 * x
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.multilinear)
>>> output = odr_obj.run()
>>> print(output.beta)
[10. 5.]
"""
def __init__(self):
super().__init__(
_lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'})
multilinear = _MultilinearModel()
def polynomial(order):
"""
Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
polynomial : Model instance
Model instance.
Examples
--------
We can fit an input data using orthogonal distance regression (ODR) with
a polynomial model:
>>> import matplotlib.pyplot as plt
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = np.sin(x)
>>> poly_model = odr.polynomial(3) # using third order polynomial model
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, poly_model)
>>> output = odr_obj.run() # running ODR fitting
>>> poly = np.poly1d(output.beta[::-1])
>>> poly_y = poly(x)
>>> plt.plot(x, y, label="input data")
>>> plt.plot(x, poly_y, label="polynomial ODR")
>>> plt.legend()
>>> plt.show()
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' %
(len_beta-1)})
class _ExponentialModel(Model):
r"""
Exponential model
This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}`
Examples
--------
We can calculate orthogonal distance regression with an exponential model:
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = -10.0 + np.exp(0.5*x)
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.exponential)
>>> output = odr_obj.run()
>>> print(output.beta)
[-10. 0.5]
"""
def __init__(self):
super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est,
meta={'name': 'Exponential',
'equ': 'y= B_0 + exp(B_1 * x)',
'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'})
exponential = _ExponentialModel()
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
class _UnilinearModel(Model):
r"""
Univariate linear model
This model is defined by :math:`y = \beta_0 x + \beta_1`
Examples
--------
We can calculate orthogonal distance regression with an unilinear model:
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = 1.0 * x + 2.0
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.unilinear)
>>> output = odr_obj.run()
>>> print(output.beta)
[1. 2.]
"""
def __init__(self):
super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est,
meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
unilinear = _UnilinearModel()
class _QuadraticModel(Model):
r"""
Quadratic model
This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2`
Examples
--------
We can calculate orthogonal distance regression with a quadratic model:
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = 1.0 * x ** 2 + 2.0 * x + 3.0
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.quadratic)
>>> output = odr_obj.run()
>>> print(output.beta)
[1. 2. 3.]
"""
def __init__(self):
super().__init__(
_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est,
meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
quadratic = _QuadraticModel()
| bsd-3-clause |
abhijeet-talaulikar/Automatic-Helmet-Detection | K-Fold/Logistic_Regression.py | 1 | 2663 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import *
from timeit import default_timer as timer
from random import randint
from sklearn.feature_selection import *
from sklearn.decomposition import PCA
helmet_data = np.genfromtxt ('helmet.csv', delimiter=",")
face_data = np.genfromtxt ('face.csv', delimiter=",")
data_full = np.concatenate((helmet_data, face_data), 0)
np.random.shuffle(data_full) #shuffle the tuples
#feature reduction (on HOG part)
#gain, j = mutual_info_classif(data_full[:, 8:-1], data_full[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0
#for i in np.arange(len(gain)):
# if gain[i] <= 0.001:
# data_full = np.delete(data_full, 8+i-j, 1)
# j += 1
#data = np.copy(data_full)
#principal component analysis
pca = PCA(n_components=150)
data = pca.fit_transform(data_full[:, 8:-1])
data = np.concatenate((data_full[:, 0:8], data, np.array([data_full[:, -1]]).T), axis=1)
precision, recall, f1, accuracy, support, fn, roc_auc = 0, 0, 0, 0, 0, 0, 0
colors = ['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange']
k = 10
kf = KFold(n_splits = k)
start = timer()
for train, test in kf.split(data):
X_train, X_test = data[train, 0:-1], data[test, 0:-1]
y_train, y_test = data[train, -1], data[test, -1]
clf = LogisticRegression().fit(X_train, y_train)
y_pred = clf.predict(X_test)
#ROC curve
y_prob = clf.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1)
roc_auc += auc(fpr, tpr)
plt.plot(fpr, tpr, color=colors[randint(0, len(colors)-1)])
precision += precision_score(y_test, y_pred, average = 'macro')
recall += recall_score(y_test, y_pred, average = 'macro')
f1 += f1_score(y_test, y_pred, average = 'macro')
accuracy += accuracy_score(y_test, y_pred)
y = y_test - y_pred
fn += sum(y[y > 0]) / len(y_test)
end = timer()
precision /= k
recall /= k
f1 /= k
accuracy /= k
fn /= k
print("Precision \t: %s" % round(precision, 4))
print("Recall \t\t: %s" % round(recall, 4))
print("F1 \t\t: %s" % round(f1, 4))
print("Accuracy \t: %s" % round(accuracy, 4))
print("False Neg \t: %s%%" % round(fn * 100, 4))
print("Mean AUC \t: %s" % round(roc_auc / k, 4))
print("\nExecution time: %s ms" % round((end - start) * 1000, 4))
#ROC curve
plt.title('Logistic Regression (AUC = %s)' % round(roc_auc, 4))
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.05,1.0])
plt.ylim([0.0,1.05])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| gpl-3.0 |
kmike/scikit-learn | sklearn/utils/tests/test_validation.py | 5 | 5499 | """Tests for input validation functions"""
from tempfile import NamedTemporaryFile
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils import (array2d, as_float_array, atleast2d_or_csr,
atleast2d_or_csc, check_arrays, safe_asarray)
def test_as_float_array():
"""Test function for as_float_array"""
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
def test_check_arrays_exceptions():
"""Check that invalid arguments raise appropriate exceptions"""
assert_raises(ValueError, check_arrays, [0], [0, 1])
assert_raises(TypeError, check_arrays, 0, [0, 1])
assert_raises(TypeError, check_arrays, [0], 0)
assert_raises(TypeError, check_arrays, [0, 1], [0, 1], meaning_of_life=42)
assert_raises(ValueError, check_arrays, [0], [0], sparse_format='fake')
def test_np_matrix():
"""Confirm that input validation code does not return np.matrix"""
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(X), np.matrix))
assert_false(isinstance(atleast2d_or_csr(np.matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csr(sp.csc_matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csc(X), np.matrix))
assert_false(isinstance(atleast2d_or_csc(np.matrix(X)), np.matrix))
assert_false(isinstance(atleast2d_or_csc(sp.csr_matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(X), np.matrix))
assert_false(isinstance(safe_asarray(np.matrix(X)), np.matrix))
assert_false(isinstance(safe_asarray(sp.lil_matrix(X)), np.matrix))
assert_true(atleast2d_or_csr(X, copy=False) is X)
assert_false(atleast2d_or_csr(X, copy=True) is X)
assert_true(atleast2d_or_csc(X, copy=False) is X)
assert_false(atleast2d_or_csc(X, copy=True) is X)
def test_memmap():
"""Confirm that input validation code doesn't copy memory mapped arrays"""
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (array2d, np.asarray, asflt, safe_asarray):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by the different
# validation utilities
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering
X = np.ones((10, 5))
for A in X, X.T:
for validator in (array2d, atleast2d_or_csr, atleast2d_or_csc):
for copy in (True, False):
B = validator(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = validator(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
def test_check_arrays():
# check that error is raised on different length inputs
X = [0, 1]
Y = np.arange(3)
assert_raises(ValueError, check_arrays, X, Y)
# check error for sparse matrix and array
X = sp.csc_matrix(np.arange(4))
assert_raises(ValueError, check_arrays, X, Y)
# check they y=None pattern
X = [0, 1, 2]
X_, Y_, Z_ = check_arrays(X, Y, None)
assert_true(Z_ is None)
# check that lists are converted
X_, Y_ = check_arrays(X, Y)
assert_true(isinstance(X_, np.ndarray))
assert_true(isinstance(Y_, np.ndarray))
# check that Y was not copied:
assert_true(Y_ is Y)
# check copying
X_, Y_ = check_arrays(X, Y, copy=True)
assert_false(Y_ is Y)
# check forcing dtype
X_, Y_ = check_arrays(X, Y, dtype=np.int)
assert_equal(X_.dtype, np.int)
assert_equal(Y_.dtype, np.int)
X_, Y_ = check_arrays(X, Y, dtype=np.float)
assert_equal(X_.dtype, np.float)
assert_equal(Y_.dtype, np.float)
# test check_ccontiguous
Y = np.arange(6).reshape(3, 2).copy('F')
# if we don't specify it, it is not changed
X_, Y_ = check_arrays(X, Y)
assert_true(Y_.flags['F_CONTIGUOUS'])
assert_false(Y_.flags['C_CONTIGUOUS'])
X_, Y_ = check_arrays(X, Y, check_ccontiguous=True)
assert_true(Y_.flags['C_CONTIGUOUS'])
assert_false(Y_.flags['F_CONTIGUOUS'])
# check that lists are passed through if allow_lists is true
X_, Y_ = check_arrays(X, Y, allow_lists=True)
assert_true(isinstance(X_, list))
| bsd-3-clause |
kHarshit/DAT210x_Microsoft | Module2/assignment3.py | 1 | 1178 | import pandas as pd
# TODO: Load up the dataset Ensuring you set the appropriate header column names
df = pd.read_csv('Datasets/servo.data')
df.columns = ['motor', 'screw', 'pgain', 'vgain', 'class']
print(df.describe())
# TODO: Create a slice that contains all entries having a vgain equal to 5. Then print the length of(# of samples in) that slice:
k = df[df.iloc[:, 3] == 5]
print(k.describe())
print(len(k))
# TODO: Create a slice that contains all entries having a motor equal to E and screw equal
# to E. Then print the length of (# of samples in) that slice:
print(df[(df.iloc[:, 0] == 'E') & (df.iloc[:, 1] == 'E')])
l = df[(df['motor'] == 'E') & (df['screw'] == 'E')]
print(l.describe())
print(len(l)) # the answer should be 6; checkout read_csv() api documentation that will fix your issue!
# TODO: Create a slice that contains all entries having a pgain equal to 4. Use one of the various methods of finding
# the mean vgain value for the samples in that slice. Once you've found it, print it:
m = df[df.pgain == 4]
print(m.mean())
print(m.vgain.mean())
# TODO: (Bonus) See what happens when you run the .dtypes method on your dataframe!
print(df.dtypes)
| mit |
msingh172/pylearn2 | pylearn2/models/independent_multiclass_logistic.py | 44 | 2491 | """
Multiclass-classification by taking the max over a set of one-against-rest
logistic classifiers.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import logging
try:
from sklearn.linear_model import LogisticRegression
except ImportError:
LogisticRegression = None
import numpy as np
from theano.compat.six.moves import xrange
logger = logging.getLogger(__name__)
class IndependentMulticlassLogistic:
"""
Fits a separate logistic regression classifier for each class, makes
predictions based on the max output: during training, views a one-hot label
vector as a vector of independent binary labels, rather than correctly
modeling them as one-hot like softmax would do.
This is what Jia+Huang used to get state of the art on CIFAR-100
Parameters
----------
C : WRITEME
"""
def __init__(self, C):
self.C = C
def fit(self, X, y):
"""
Fits the model to the given training data.
Parameters
----------
X : ndarray
2D array, each row is one example
y : ndarray
vector of integer class labels
"""
if LogisticRegression is None:
raise RuntimeError("sklearn not available.")
min_y = y.min()
max_y = y.max()
assert min_y == 0
num_classes = max_y + 1
assert num_classes > 1
logistics = []
for c in xrange(num_classes):
logger.info('fitting class {0}'.format(c))
cur_y = (y == c).astype('int32')
logistics.append(LogisticRegression(C = self.C).fit(X,cur_y))
return Classifier(logistics)
class Classifier:
"""
.. todo::
WRITEME
Parameters
----------
logistics : WRITEME
"""
def __init__(self, logistics):
assert len(logistics) > 1
num_classes = len(logistics)
num_features = logistics[0].coef_.shape[1]
self.W = np.zeros((num_features, num_classes))
self.b = np.zeros((num_classes,))
for i in xrange(num_classes):
self.W[:,i] = logistics[i].coef_
self.b[i] = logistics[i].intercept_
def predict(self, X):
"""
.. todo::
WRITEME
"""
return np.argmax(self.b + np.dot(X,self.W), 1)
| bsd-3-clause |
aabadie/scikit-learn | benchmarks/bench_plot_neighbors.py | 101 | 6469 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
plt.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = plt.subplot(sbplt, yscale='log')
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = plt.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
plt.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
plt.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
plt.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
plt.show()
| bsd-3-clause |
linebp/pandas | pandas/tests/series/test_indexing.py | 1 | 88099 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
import pandas._libs.index as _index
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas import (Index, Series, DataFrame, isnull,
date_range, NaT, MultiIndex,
Timestamp, DatetimeIndex, Timedelta)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas._libs import tslib, lib
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (slow,
assert_series_equal,
assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData):
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan(self):
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
# ensure that fixing the above hasn't broken get
# with multiple elements
idx = [20, 30]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
idx = [np.nan, np.nan]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
pytest.raises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
pytest.raises(IndexError, s.__getitem__, -11)
pytest.raises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_iloc(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.loc[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
assert (s[1:3] == 0).all()
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iloc_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
assert s.iloc[2] == 2
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_getitem_setitem_datetime_tz_pytz(self):
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
from dateutil.tz import tzutc
from pandas._libs.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(
x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H',
tz='America/New_York')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex(self):
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
assert result == expected
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
pytest.raises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
pytest.raises(TypeError, f)
pytest.raises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
pytest.raises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
pytest.raises(Exception, s.__setitem__, inds_notfound, 0)
pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2['a'] = expected
result = s2['a']
assert result == expected
def test_loc_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.loc[inds], self.series.reindex(inds))
assert_series_equal(self.series.iloc[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.loc[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.loc[mask], self.series[mask])
# ask for index value
assert self.ts.loc[d1] == self.ts[d1]
assert self.ts.loc[d2] == self.ts[d2]
def test_loc_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))
pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)
def test_loc_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)
def test_loc_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.loc[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isnull(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
klasses = [list, tuple, np.array, Series]
for klass in klasses:
result = s.where(klass(cond))
assert_series_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
conds = [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"),
pd.NaT, Timestamp("2017-01-02")]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.loc[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.loc[inds] = 5
pytest.raises(Exception, self.series.loc.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(ValueError, s.drop, 'bc')
pytest.raises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(self):
a, b = self.ts.align(self.ts, copy=False)
assert a.index is self.ts.index
assert b.index is self.ts.index
a, b = self.ts.align(self.ts, copy=True)
assert a.index is not self.ts.index
assert b.index is not self.ts.index
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(self.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(self.series.index)
assert identity.index.identical(self.series.index)
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == self.series[idx]
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == self.ts[idx]
stuffSeries = self.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == self.ts[idx]
# return a copy the same index here
result = self.ts.reindex()
assert not (result is self.ts)
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isnull()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
pytest.raises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
assert isnull(filled_bool[:5]).all()
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0])
mask = s > 0
s2 = s[mask].map(str)
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0])
mask = Series([False, True, True, False])
s2 = s[mask]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo', 'bar', 0]))
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.head(0), self.series[0:0])
assert_series_equal(self.series.tail(), self.series[-5:])
assert_series_equal(self.series.tail(0), self.series[0:0])
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.loc['foo']
assert result.name == s.name
assert result2.name == s.name
def test_setitem_scalar_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
for n in range(len(series)):
with pytest.raises(ValueError):
series[n] = 1
assert array[n] == 0
def test_setitem_slice_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
with pytest.raises(ValueError):
series[1:3] = 1
assert not array.any()
class TestTimeSeriesDuplicates(object):
def setup_method(self, method):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
assert isinstance(self.dups, Series)
assert isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
assert not self.dups.index.is_unique
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
assert uniques.dtype == 'M8[ns]' # sanity
tm.assert_index_equal(uniques, expected)
assert self.dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
assert result.tz is not None
assert result.name == 'foo'
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [tslib.iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t)
for t in range(20)] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
# TODO: unused?
s = str(t) # noqa
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result, expected)
compare(slice('2011-01-01', '2011-01-15'))
compare(slice('2010-12-30', '2011-01-15'))
compare(slice('2011-01-01', '2011-01-16'))
# partial ranges
compare(slice('2011-01-01', '2011-01-6'))
compare(slice('2011-01-06', '2011-01-8'))
compare(slice('2011-01-06', '2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
assert t.year == 2005
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
expected.name = 'A'
df = DataFrame(dict(A=ts))
result = df['2001']['A']
assert_series_equal(expected, result)
# setting
ts['2001'] = 1
expected = ts['2001']
expected.name = 'A'
df.loc['2001', 'A'] = 1
result = df['2001']['A']
assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00',
freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59',
freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = [Timestamp('2013-05-31 00:00'),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(np.random.rand(5, 5),
columns=['open', 'high', 'low', 'close', 'volume'],
index=date_range('2012-01-02 18:01:00',
periods=5, tz='US/Central', freq='s'))
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', )
pytest.raises(KeyError, df.__getitem__, df.index[2], )
class TestDatetimeIndexing(object):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s['1/2/2009'] = -2
assert s[48] == -2
s['1/2/2009':'2009-06-05'] = -3
assert (s[48:54] == -3).all()
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
assert (res == exp).all()
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype('M8[ns]')
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
assert df.index[0] == stamp
assert df.reset_index()['Date'][0] == stamp
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)]
def test_slicing_datetimes(self):
# GH 7523
# unique
df = DataFrame(np.arange(4., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
# duplicates
df = pd.DataFrame(np.arange(5., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
assert (-result).all()
class TestNatIndexing(object):
def setup_method(self, method):
self.series = Series(date_range('1/1/2000', periods=10))
# ---------------------------------------------------------------------
# NaT support
def test_set_none_nan(self):
self.series[3] = None
assert self.series[3] is NaT
self.series[3:5] = None
assert self.series[4] is NaT
self.series[5] = np.nan
assert self.series[5] is NaT
self.series[5:7] = np.nan
assert self.series[6] is NaT
def test_nat_operations(self):
# GH 8617
s = Series([0, pd.NaT], dtype='m8[ns]')
exp = s[0]
assert s.median() == exp
assert s.min() == exp
assert s.max() == exp
def test_round_nat(self):
# GH14940
s = Series([pd.NaT])
expected = Series(pd.NaT)
for method in ["round", "floor", "ceil"]:
round_method = getattr(s.dt, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert_series_equal(round_method(freq), expected)
| bsd-3-clause |
arahuja/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
academicpages/academicpages.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
charman2/rsas | examples/unsteady.py | 1 | 5254 | # -*- coding: utf-8 -*-
"""Storage selection (SAS) functions: example with multiple fluxes out at steady state
Runs the rSAS model for a synthetic dataset with one flux in and
multiple fluxes out and steady state flow
Theory is presented in:
Harman, C. J. (2014), Time-variable transit time distributions and transport:
Theory and application to storage-dependent transport of chloride in a watershed,
Water Resour. Res., 51, doi:10.1002/2014WR015707.
"""
from __future__ import division
import rsas
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Initializes the random number generator so we always get the same result
np.random.seed(0)
# =====================================
# Load the input data
# =====================================
data = pd.read_csv('Q1.csv', index_col=0, parse_dates=[1])
# length of the dataset
N = len(data)
# The individual timeseries can be pulled out of the dataframe
S = data['S'].values
J = data['J'].values
Q = data['Q1'].values
C_J = data['C_J'].values-2
C_Q1 = data['C_Q1'].values
ST_min = data['ST_min'].values
ST_max = data['ST_max'].values
# =========================
# Parameters needed by rsas
# =========================
# The concentration of water older than the start of observations
C_old = ((J*C_J)[J>0]).sum()/((J)[J>0]).sum()
# =========================
# Create the rsas functions
# =========================
S_dead = 10.
#lam = 0.
# Uniform
# Parameters for the rSAS function
Q_rSAS_fun_type = 'uniform'
ST_min = np.zeros(N)
ST_max = S + S_dead
Q_rSAS_fun_parameters = np.c_[ST_min, ST_max]
rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
rSAS_fun = [rSAS_fun_Q1]
# Kumaraswami
## Parameters for the rSAS function
#Q_rSAS_fun_type = 'kumaraswami'
#ST_min = np.ones(N) * 0.
#ST_max = S + S_dead
#a = np.maximum(0.01, 2. + lam * (S - S.mean())/S.std())
#b = np.ones(N) * 5.
#Q_rSAS_fun_parameters = np.c_[a, b, ST_min, ST_max]
#rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
#rSAS_fun = [rSAS_fun_Q1]
# =================
# Initial condition
# =================
# Unknown initial age distribution, so just set this to zeros
ST_init = np.zeros(N + 1)
# =============
# Run the model
# =============
# Run it
outputs = rsas.solve(J, Q, rSAS_fun, ST_init=ST_init,
mode='RK4', dt = 1., n_substeps=3, C_J=C_J, C_old=[C_old], verbose=False, debug=False)
# Let's pull these out to make the outputs from rsas crystal clear
# State variables: age-ranked storage of water and solutes
# ROWS of ST, MS are T - ages
# COLUMNS of ST, MS are t - times
# LAYERS of MS are s - solutes
ST = outputs['ST']
MS = outputs['MS'][:,:,0]
# Timestep-averaged backwards TTD
# ROWS of PQ are T - ages
# COLUMNS of PQ are t - times
# LAYERS of PQ are q - fluxes
PQ1m = outputs['PQ'][:,:,0]
# Timestep-averaged outflow concentration
# ROWS of C_Q are t - times
# COLUMNS of PQ are q - fluxes
C_Q1m1 = outputs['C_Q'][:,0,0]
# Timestep averaged solute load out
# ROWS of MQ are T - ages
# COLUMNS of MQ are t - times
# LAYERS of MQ are q - fluxes
# Last dimension of MS are s - solutes
MQ1m = outputs['MQ'][:,:,0,0]
#%%
# ==================================
# Plot the rSAS function
# ==================================
STx = np.linspace(0,S.max()+S_dead,100)
Omega = np.r_[[rSAS_fun_Q1.cdf_i(STx,i) for i in range(N)]].T
import matplotlib.cm as cm
fig = plt.figure(0)
plt.clf()
for i in range(N):
plt.plot(STx, Omega[:,i], lw=1, color=cm.jet((S[i]-S.min())/S.ptp()))
plt.ylim((0,1))
plt.ylabel('$\Omega_Q(T)$')
plt.xlabel('age-ranked storage $S_T$')
plt.title('Cumulative rSAS function')
#%%
# ==================================
# Plot the transit time distribution
# ==================================
fig = plt.figure(1)
plt.clf()
plt.plot(PQ1m, lw=1)
plt.ylim((0,1))
plt.ylabel('$P_Q(T)$')
plt.xlabel('age $T$')
plt.title('Cumulative transit time distribution')
#%%
# =====================================================================
# Outflow concentration estimated using several different TTD
# =====================================================================
# Lets get the instantaneous value of the TTD at the end of each timestep
PQ1i = np.zeros((N+1, N+1))
PQ1i[:,0] = rSAS_fun_Q1.cdf_i(ST[:,0],0)
PQ1i[:,1:] = np.r_[[rSAS_fun_Q1.cdf_i(ST[:,i+1],i) for i in range(N)]].T
# Use the transit time distribution and input timeseries to estimate
# the output timeseries for the instantaneous and timestep-averaged cases
C_Q1i, C_Q1i_raw, Q1i_observed_fraction = rsas.transport(PQ1i, C_J, C_old)
C_Q1m2, C_Q1m2_raw, Q1m2_observed_fraction = rsas.transport(PQ1m, C_J, C_old)
# Plot the results
fig = plt.figure(2)
plt.clf()
plt.step(data['datetime'], C_Q1m1, 'g', ls='--', label='mean rsas internal', lw=2, where='post')
plt.step(data['datetime'], C_Q1m2, 'b', ls=':', label='mean rsas.transport', lw=2, where='post')
plt.step(data['datetime'], C_Q1m2_raw, '0.5', ls=':', label='mean rsas.transport (obs part)', lw=2, where='post')
plt.plot(data['datetime'], C_Q1i, 'b:o', label='inst. rsas.transport', lw=1)
#plt.plot(data['datetime'], data['C_Q1'], 'r.', label='observed', lw=2)
plt.ylim((-2, 0))
plt.legend(loc=0)
plt.ylabel('Concentration [-]')
plt.xlabel('time')
plt.title('Outflow concentration')
plt.show()
| mit |
keflavich/pyspeckit | docs/conf.py | 4 | 12272 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be'
' installed')
sys.exit(1)
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# -*- coding: utf-8 -*-
try:
import numpy
except ImportError:
print("Failed to import numpy")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, rootpath)
#import numpydoc
#sys.path.insert(0, os.path.split(numpydoc.__file__)[0])
sys.path.insert(0, rootpath+"/docs/sphinxext/")
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('.'))
print("rootpath: ",rootpath)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
#sys.path.insert(0, os.path.abspath('.'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions += ['edit_on_github', 'edit_on_bitbucket']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
html_sidebars = {'**':['globaltoc.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']}
# General information about the project.
project = 'pyspeckit'
copyright = '2011, Adam Ginsburg and coauthors'
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# read the docs mocks
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = {'matplotlib', 'matplotlib.pyplot', 'matplotlib.figure',
'matplotlib.widgets', 'matplotlib.cbook', 'pyfits', 'scipy',
'scipy', 'pyfits', 'pytest',
'scipy.interpolate', 'scipy.ndimage', 'pywcs', 'matplotlib',
'matplotlib.pyplot', 'h5py', 'atpy','progressbar'}
for mod_name in MOCK_MODULES:
if mod_name not in sys.modules:
sys.modules[mod_name] = Mock()
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# The short X.Y version.
#import pyspeckit
#version = pyspeckit.__version__
## The full version, including alpha/beta/rc tags.
#release = pyspeckit.__version__
#
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build','_static','_template']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agogo'
html_style = 'extra.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = dict(
pagewidth = '1000px',
documentwidth = '760px',
sidebarwidth = '200px',
nosidebar=False,
headerbg="#666666",
headercolor1="#000000",
headercolor2="#000000",
headerlinkcolor="#FF9522",
linkcolor="#4a8f43",
textalign='left',
)
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static','_static/extra.css','_static/scipy.css','_static/astropy.css']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyspeckitdoc'
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyspeckit.tex', 'pyspeckit Documentation',
'Adam Ginsburg and coauthors', 'manual'),
]
latex_documents = [('index', project + '.tex', project + ' Documentation',
author, 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Try to make autoclass include both __init__ and Class docstrings
autoclass_content = 'both'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + ' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_bitbucket_project = "pyspeckit/pyspeckit"
edit_on_bitbucket_source_root = ""
edit_on_bitbucket_doc_root = "doc"
| mit |
cauchycui/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |