repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
flightgong/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 12 | 2020 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
This example consists in fitting a Gaussian Process model onto the diabetes
dataset.
The correlation parameters are determined by means of maximum likelihood
estimation (MLE). An anisotropic squared exponential correlation model with a
constant regression model are assumed. We also used a nugget = 1e-2 in order to
account for the (strong) noise in the targets.
We compute then compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
grigorisg9gr/menpo | menpo/image/base.py | 1 | 128607 | from __future__ import division
from warnings import warn
from collections import Iterable
import numpy as np
import PIL.Image as PILImage
from menpo.compatibility import basestring
from menpo.base import (Vectorizable, MenpoDeprecationWarning,
copy_landmarks_and_path)
from menpo.shape import PointCloud, bounding_box
from menpo.landmark import Landmarkable
from menpo.transform import (Translation, NonUniformScale, Rotation,
AlignmentUniformScale, Affine, scale_about_centre,
transform_about_centre)
from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable
from .interpolation import scipy_interpolation, cython_interpolation
from .patches import extract_patches, set_patches
# Cache the greyscale luminosity coefficients as they are invariant.
_greyscale_luminosity_coef = None
class ImageBoundaryError(ValueError):
r"""
Exception that is thrown when an attempt is made to crop an image beyond
the edge of it's boundary.
Parameters
----------
requested_min : ``(d,)`` `ndarray`
The per-dimension minimum index requested for the crop
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index requested for the crop
snapped_min : ``(d,)`` `ndarray`
The per-dimension minimum index that could be used if the crop was
constrained to the image boundaries.
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index that could be used if the crop was
constrained to the image boundaries.
"""
def __init__(self, requested_min, requested_max, snapped_min,
snapped_max):
super(ImageBoundaryError, self).__init__()
self.requested_min = requested_min
self.requested_max = requested_max
self.snapped_min = snapped_min
self.snapped_max = snapped_max
def indices_for_image_of_shape(shape):
r"""
The indices of all pixels in an image with a given shape (without
channel information).
Parameters
----------
shape : ``(n_dims, n_pixels)`` `ndarray`
The shape of the image.
Returns
-------
indices : `ndarray`
The indices of all the pixels in the image.
"""
return np.indices(shape).reshape([len(shape), -1]).T
def normalize_pixels_range(pixels, error_on_unknown_type=True):
r"""
Normalize the given pixels to the Menpo valid floating point range, [0, 1].
This is a single place to handle normalising pixels ranges. At the moment
the supported types are uint8 and uint16.
Parameters
----------
pixels : `ndarray`
The pixels to normalize in the floating point range.
error_on_unknown_type : `bool`, optional
If ``True``, this method throws a ``ValueError`` if the given pixels
array is an unknown type. If ``False``, this method performs no
operation.
Returns
-------
normalized_pixels : `ndarray`
The normalized pixels in the range [0, 1].
Raises
------
ValueError
If ``pixels`` is an unknown type and ``error_on_unknown_type==True``
"""
dtype = pixels.dtype
if dtype == np.uint8:
max_range = 255.0
elif dtype == np.uint16:
max_range = 65535.0
else:
if error_on_unknown_type:
raise ValueError('Unexpected dtype ({}) - normalisation range '
'is unknown'.format(dtype))
else:
# Do nothing
return pixels
# This multiplication is quite a bit faster than just dividing - will
# automatically cast it up to float64
return pixels * (1.0 / max_range)
def denormalize_pixels_range(pixels, out_dtype):
"""
Denormalize the given pixels array into the range of the given out dtype.
If the given pixels are floating point or boolean then the values
are scaled appropriately and cast to the output dtype. If the pixels
are already the correct dtype they are immediately returned.
Floating point pixels must be in the range [0, 1].
Currently uint8 and uint16 output dtypes are supported.
Parameters
----------
pixels : `ndarray`
The pixels to denormalize.
out_dtype : `np.dtype`
The numpy data type to output and scale the values into.
Returns
-------
out_pixels : `ndarray`
Will be in the correct range and will have type ``out_dtype``.
Raises
------
ValueError
Pixels are floating point and range outside [0, 1]
ValueError
Input pixels dtype not in the set {float32, float64, bool}.
ValueError
Output dtype not in the set {uint8, uint16}
"""
in_dtype = pixels.dtype
if in_dtype == out_dtype:
return pixels
if np.issubclass_(in_dtype.type, np.floating) or in_dtype == np.float:
if np.issubclass_(out_dtype, np.floating) or out_dtype == np.float:
return pixels.astype(out_dtype)
else:
p_min = pixels.min()
p_max = pixels.max()
if p_min < 0.0 or p_max > 1.0:
raise ValueError('Unexpected input range [{}, {}] - pixels '
'must be in the range [0, 1]'.format(p_min,
p_max))
elif in_dtype != np.bool:
raise ValueError('Unexpected input dtype ({}) - only float32, float64 '
'and bool supported'.format(in_dtype))
if out_dtype == np.uint8:
max_range = 255.0
elif out_dtype == np.uint16:
max_range = 65535.0
else:
raise ValueError('Unexpected output dtype ({}) - normalisation range '
'is unknown'.format(out_dtype))
return (pixels * max_range).astype(out_dtype)
def channels_to_back(pixels):
r"""
Roll the channels from the front to the back for an image. If the image
that is passed is already a numpy array, then that is also fine.
Always returns a numpy array because our :map:`Image` containers do not
support channels at the back.
Parameters
----------
image : `ndarray`
The pixels or image to roll the channel back for.
Returns
-------
rolled_pixels : `ndarray`
The numpy array of pixels with the channels on the last axis.
"""
return np.require(np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype,
requirements=['C'])
def channels_to_front(pixels):
r"""
Convert the given pixels array (channels assumed to be at the last axis
as is common in other imaging packages) into a numpy array.
Parameters
----------
pixels : ``(H, W, C)`` `buffer`
The pixels to convert to the Menpo channels at axis 0.
Returns
-------
pixels : ``(C, H, W)`` `ndarray`
Numpy array, channels as axis 0.
"""
if not isinstance(pixels, np.ndarray):
pixels = np.array(pixels)
return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype,
requirements=['C'])
class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable):
r"""
An n-dimensional image.
Images are n-dimensional homogeneous regular arrays of data. Each
spatially distinct location in the array is referred to as a `pixel`.
At a pixel, ``k`` distinct pieces of information can be stored. Each
datum at a pixel is refereed to as being in a `channel`. All pixels in
the image have the same number of channels, and all channels have the
same data-type (`float64`).
Parameters
----------
image_data : ``(C, M, N ..., Q)`` `ndarray`
Array representing the image pixels, with the first axis being
channels.
copy : `bool`, optional
If ``False``, the ``image_data`` will not be copied on assignment.
Note that this will miss out on additional checks. Further note that we
still demand that the array is C-contiguous - if it isn't, a copy will
be generated anyway.
In general, this should only be used if you know what you are doing.
Raises
------
Warning
If ``copy=False`` cannot be honoured
ValueError
If the pixel array is malformed
"""
def __init__(self, image_data, copy=True):
super(Image, self).__init__()
if not copy:
if not image_data.flags.c_contiguous:
image_data = np.array(image_data, copy=True, order='C')
warn('The copy flag was NOT honoured. A copy HAS been made. '
'Please ensure the data you pass is C-contiguous.')
else:
image_data = np.array(image_data, copy=True, order='C')
# Degenerate case whereby we can just put the extra axis
# on ourselves
if image_data.ndim == 2:
# Ensures that the data STAYS C-contiguous
image_data = image_data.reshape((1,) + image_data.shape)
if image_data.ndim < 2:
raise ValueError(
"Pixel array has to be 2D (implicitly 1 channel, "
"2D shape) or 3D+ (n_channels, 2D+ shape) "
" - a {}D array "
"was provided".format(image_data.ndim))
self.pixels = image_data
@classmethod
def init_blank(cls, shape, n_channels=1, fill=0, dtype=np.float):
r"""
Returns a blank image.
Parameters
----------
shape : `tuple` or `list`
The shape of the image. Any floating point values are rounded up
to the nearest integer.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
Returns
-------
blank_image : :map:`Image`
A new image of the requested size.
"""
# Ensure that the '+' operator means concatenate tuples
shape = tuple(np.ceil(shape).astype(np.int))
if fill == 0:
pixels = np.zeros((n_channels,) + shape, dtype=dtype)
else:
pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill
# We know there is no need to copy...
return cls(pixels, copy=False)
@classmethod
def init_from_rolled_channels(cls, pixels):
r"""
Deprecated - please use the equivalent ``init_from_channels_at_back`` method.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .init_from_channels_at_back instead.',
MenpoDeprecationWarning)
return cls.init_from_channels_at_back(pixels)
@classmethod
def init_from_channels_at_back(cls, pixels):
r"""
Create an Image from a set of pixels where the channels axis is on
the last axis (the back). This is common in other frameworks, and
therefore this method provides a convenient means of creating a menpo
Image from such data. Note that a copy is always created due to the
need to rearrange the data.
Parameters
----------
pixels : ``(M, N ..., Q, C)`` `ndarray`
Array representing the image pixels, with the last axis being
channels.
Returns
-------
image : :map:`Image`
A new image from the given pixels, with the FIRST axis as the
channels.
Raises
------
ValueError
If image is not at least 2D, i.e. has at least 2 dimensions plus
the channels in the end.
"""
if pixels.ndim == 2:
pixels = pixels[..., None]
if pixels.ndim < 2:
raise ValueError(
"Pixel array has to be 2D "
"(2D shape, implicitly 1 channel) "
"or 3D+ (2D+ shape, n_channels) "
" - a {}D array "
"was provided".format(pixels.ndim))
return cls(channels_to_front(pixels))
@classmethod
def init_from_pointcloud(cls, pointcloud, group=None, boundary=0,
n_channels=1, fill=0, dtype=np.float,
return_transform=False):
r"""
Create an Image that is big enough to contain the given pointcloud.
The pointcloud will be translated to the origin and then translated
according to its bounds in order to fit inside the new image.
An optional boundary can be provided in order to increase the space
around the boundary of the pointcloud. The boundary will be added
to *all sides of the image* and so a boundary of 5 provides 10 pixels
of boundary total for each dimension.
Parameters
----------
pointcloud : :map:`PointCloud`
Pointcloud to place inside the newly created image.
group : `str`, optional
If ``None``, the pointcloud will only be used to create the image.
If a `str` then the pointcloud will be attached as a landmark
group to the image, with the given string as key.
boundary : `float`
A optional padding distance that is added to the pointcloud bounds.
Default is ``0``, meaning the max/min of tightest possible
containing image is returned.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
adjust the PointCloud in order to build the image, is returned.
Returns
-------
image : ``type(cls)`` Image or subclass
A new image with the same size as the given pointcloud, optionally
with the pointcloud attached as landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
# Translate pointcloud to the origin
minimum = pointcloud.bounds(boundary=boundary)[0]
tr = Translation(-minimum)
origin_pc = tr.apply(pointcloud)
image_shape = origin_pc.range(boundary=boundary)
new_image = cls.init_blank(image_shape, n_channels=n_channels,
fill=fill, dtype=dtype)
if group is not None:
new_image.landmarks[group] = origin_pc
if return_transform:
return new_image, tr
else:
return new_image
def as_masked(self, mask=None, copy=True):
r"""
Return a copy of this image with an attached mask behavior.
A custom mask may be provided, or ``None``. See the :map:`MaskedImage`
constructor for details of how the kwargs will be handled.
Parameters
----------
mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage`
A mask to attach to the newly generated masked image.
copy : `bool`, optional
If ``False``, the produced :map:`MaskedImage` will share pixels with
``self``. Only suggested to be used for performance.
Returns
-------
masked_image : :map:`MaskedImage`
An image with the same pixels and landmarks as this one, but with
a mask.
"""
from menpo.image import MaskedImage
return copy_landmarks_and_path(self,
MaskedImage(self.pixels,
mask=mask, copy=copy))
@property
def n_dims(self):
r"""
The number of dimensions in the image. The minimum possible ``n_dims``
is 2.
:type: `int`
"""
return len(self.shape)
@property
def n_pixels(self):
r"""
Total number of pixels in the image ``(prod(shape),)``
:type: `int`
"""
return self.pixels[0, ...].size
@property
def n_elements(self):
r"""
Total number of data points in the image
``(prod(shape), n_channels)``
:type: `int`
"""
return self.pixels.size
@property
def n_channels(self):
"""
The number of channels on each pixel in the image.
:type: `int`
"""
return self.pixels.shape[0]
@property
def width(self):
r"""
The width of the image.
This is the width according to image semantics, and is thus the size
of the **last** dimension.
:type: `int`
"""
return self.pixels.shape[-1]
@property
def height(self):
r"""
The height of the image.
This is the height according to image semantics, and is thus the size
of the **second to last** dimension.
:type: `int`
"""
return self.pixels.shape[-2]
@property
def shape(self):
r"""
The shape of the image
(with ``n_channel`` values at each point).
:type: `tuple`
"""
return self.pixels.shape[1:]
def bounds(self):
r"""
The bounds of the image, minimum is always (0, 0). The maximum is
the maximum **index** that can be used to index into the image for each
dimension. Therefore, bounds will be of the form:
((0, 0), (self.height - 1, self.width - 1)) for a 2D image.
Note that this is akin to supporting a nearest neighbour interpolation.
Although the *actual* maximum subpixel value would be something
like ``self.height - eps`` where ``eps`` is some value arbitrarily
close to 0, this value at least allows sampling without worrying about
floating point error.
:type: `tuple`
"""
return (0,) * self.n_dims, tuple(s - 1 for s in self.shape)
def diagonal(self):
r"""
The diagonal size of this image
:type: `float`
"""
return np.sqrt(np.sum(np.array(self.shape) ** 2))
def centre(self):
r"""
The geometric centre of the Image - the subpixel that is in the
middle.
Useful for aligning shapes and images.
:type: (``n_dims``,) `ndarray`
"""
return np.array(self.shape, dtype=np.double) / 2
def _str_shape(self):
if self.n_dims > 2:
return ' x '.join(str(dim) for dim in self.shape)
elif self.n_dims == 2:
return '{}W x {}H'.format(self.width, self.height)
def indices(self):
r"""
Return the indices of all pixels in this image.
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return indices_for_image_of_shape(self.shape)
def _as_vector(self, keep_channels=False):
r"""
The vectorized form of this image.
Parameters
----------
keep_channels : `bool`, optional
========== =============================
Value Return shape
========== =============================
`False` ``(n_channels * n_pixels,)``
`True` ``(n_channels, n_pixels)``
========== =============================
Returns
-------
vec : (See ``keep_channels`` above) `ndarray`
Flattened representation of this image, containing all pixel
and channel information.
"""
if keep_channels:
return self.pixels.reshape([self.n_channels, -1])
else:
return self.pixels.ravel()
def from_vector(self, vector, n_channels=None, copy=True):
r"""
Takes a flattened vector and returns a new image formed by reshaping
the vector to the correct pixels and channels.
The `n_channels` argument is useful for when we want to add an extra
channel to an image but maintain the shape. For example, when
calculating the gradient.
Note that landmarks are transferred in the process.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A flattened vector of all pixels and channels of an image.
n_channels : `int`, optional
If given, will assume that vector is the same shape as this image,
but with a possibly different number of channels.
copy : `bool`, optional
If ``False``, the vector will not be copied in creating the new
image.
Returns
-------
image : :map:`Image`
New image of same shape as this image and the number of
specified channels.
Raises
------
Warning
If the ``copy=False`` flag cannot be honored
"""
# This is useful for when we want to add an extra channel to an image
# but maintain the shape. For example, when calculating the gradient
n_channels = self.n_channels if n_channels is None else n_channels
image_data = vector.reshape((n_channels,) + self.shape)
new_image = Image(image_data, copy=copy)
new_image.landmarks = self.landmarks
return new_image
def _from_vector_inplace(self, vector, copy=True):
r"""
Takes a flattened vector and update this image by
reshaping the vector to the correct dimensions.
Parameters
----------
vector : ``(n_pixels,)`` `bool ndarray`
A vector vector of all the pixels of a :map:`BooleanImage`.
copy: `bool`, optional
If ``False``, the vector will be set as the pixels. If ``True``, a
copy of the vector is taken.
Raises
------
Warning
If ``copy=False`` flag cannot be honored
Note
----
For :map:`BooleanImage` this is rebuilding a boolean image **itself**
from boolean values. The mask is in no way interpreted in performing
the operation, in contrast to :map:`MaskedImage`, where only the masked
region is used in :meth:`from_vector_inplace` and :meth:`as_vector`.
"""
image_data = vector.reshape(self.pixels.shape)
if not copy:
if not image_data.flags.c_contiguous:
warn('The copy flag was NOT honoured. A copy HAS been made. '
'Please ensure the data you pass is C-contiguous.')
image_data = np.array(image_data, copy=True, order='C',
dtype=image_data.dtype)
else:
image_data = np.array(image_data, copy=True, order='C',
dtype=image_data.dtype)
self.pixels = image_data
def extract_channels(self, channels):
r"""
A copy of this image with only the specified channels.
Parameters
----------
channels : `int` or `[int]`
The channel index or `list` of channel indices to retain.
Returns
-------
image : `type(self)`
A copy of this image with only the channels requested.
"""
copy = self.copy()
if not isinstance(channels, list):
channels = [channels] # ensure we don't remove the channel axis
copy.pixels = self.pixels[channels]
return copy
def as_histogram(self, keep_channels=True, bins='unique'):
r"""
Histogram binning of the values of this image.
Parameters
----------
keep_channels : `bool`, optional
If set to ``False``, it returns a single histogram for all the
channels of the image. If set to ``True``, it returns a `list` of
histograms, one for each channel.
bins : ``{unique}``, positive `int` or sequence of scalars, optional
If set equal to ``'unique'``, the bins of the histograms are centred
on the unique values of each channel. If set equal to a positive
`int`, then this is the number of bins. If set equal to a
sequence of scalars, these will be used as bins centres.
Returns
-------
hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside
The histogram(s). If ``keep_channels=False``, then hist is an
`ndarray`. If ``keep_channels=True``, then hist is a `list` with
``len(hist)=n_channels``.
bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside
An array or a list of arrays corresponding to the above histograms
that store the bins' edges.
Raises
------
ValueError
Bins can be either 'unique', positive int or a sequence of scalars.
Examples
--------
Visualizing the histogram when a list of array bin edges is provided:
>>> hist, bin_edges = image.as_histogram()
>>> for k in range(len(hist)):
>>> plt.subplot(1,len(hist),k)
>>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0])
>>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2
>>> plt.bar(centre, hist[k], align='center', width=width)
"""
# parse options
if isinstance(bins, basestring):
if bins == 'unique':
bins = 0
else:
raise ValueError("Bins can be either 'unique', positive int or"
"a sequence of scalars.")
elif isinstance(bins, int) and bins < 1:
raise ValueError("Bins can be either 'unique', positive int or a "
"sequence of scalars.")
# compute histogram
vec = self.as_vector(keep_channels=keep_channels)
if len(vec.shape) == 1 or vec.shape[0] == 1:
if bins == 0:
bins = np.unique(vec)
hist, bin_edges = np.histogram(vec, bins=bins)
else:
hist = []
bin_edges = []
num_bins = bins
for ch in range(vec.shape[0]):
if bins == 0:
num_bins = np.unique(vec[ch, :])
h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins)
hist.append(h_tmp)
bin_edges.append(c_tmp)
return hist, bin_edges
def _view_2d(self, figure_id=None, new_figure=False, channels=None,
interpolation='bilinear', cmap_name=None, alpha=1.,
render_axes=False, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', axes_x_limits=None,
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
r"""
View the image using the default image viewer. This method will appear
on the Image as ``view`` if the Image is 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return ImageViewer(figure_id, new_figure, self.n_dims,
self.pixels, channels=channels).render(
interpolation=interpolation, cmap_name=cmap_name, alpha=alpha,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits, axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks, figure_size=figure_size)
def view_widget(self, browser_style='buttons', figure_size=(10, 8),
style='coloured'):
r"""
Visualizes the image object using an interactive widget. Currently
only supports the rendering of 2D images.
Parameters
----------
browser_style : {``'buttons'``, ``'slider'``}, optional
It defines whether the selector of the images will have the form of
plus/minus buttons or a slider.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import visualize_images
visualize_images(self, figure_size=figure_size, style=style,
browser_style=browser_style)
except ImportError:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError()
def _view_landmarks_2d(self, channels=None, group=None,
with_labels=None, without_labels=None,
figure_id=None, new_figure=False,
interpolation='bilinear', cmap_name=None, alpha=1.,
render_lines=True, line_colour=None, line_style='-',
line_width=1, render_markers=True, marker_style='o',
marker_size=5, marker_face_colour=None,
marker_edge_colour=None, marker_edge_width=1.,
render_numbering=False,
numbers_horizontal_align='center',
numbers_vertical_align='bottom',
numbers_font_name='sans-serif', numbers_font_size=10,
numbers_font_style='normal',
numbers_font_weight='normal',
numbers_font_colour='k', render_legend=False,
legend_title='', legend_font_name='sans-serif',
legend_font_style='normal', legend_font_size=10,
legend_font_weight='normal',
legend_marker_scale=None,
legend_location=2, legend_bbox_to_anchor=(1.05, 1.),
legend_border_axes_pad=None, legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None, legend_border=True,
legend_border_padding=None, legend_shadow=False,
legend_rounded_corners=False, render_axes=False,
axes_font_name='sans-serif', axes_font_size=10,
axes_font_style='normal', axes_font_weight='normal',
axes_x_limits=None, axes_y_limits=None,
axes_x_ticks=None, axes_y_ticks=None,
figure_size=(10, 8)):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
from menpo.visualize import view_image_landmarks
return view_image_landmarks(
self, channels, False, group, with_labels, without_labels,
figure_id, new_figure, interpolation, cmap_name, alpha,
render_lines, line_colour, line_style, line_width,
render_markers, marker_style, marker_size, marker_face_colour,
marker_edge_colour, marker_edge_width, render_numbering,
numbers_horizontal_align, numbers_vertical_align,
numbers_font_name, numbers_font_size, numbers_font_style,
numbers_font_weight, numbers_font_colour, render_legend,
legend_title, legend_font_name, legend_font_style,
legend_font_size, legend_font_weight, legend_marker_scale,
legend_location, legend_bbox_to_anchor, legend_border_axes_pad,
legend_n_columns, legend_horizontal_spacing,
legend_vertical_spacing, legend_border, legend_border_padding,
legend_shadow, legend_rounded_corners, render_axes, axes_font_name,
axes_font_size, axes_font_style, axes_font_weight, axes_x_limits,
axes_y_limits, axes_x_ticks, axes_y_ticks, figure_size)
def crop(self, min_indices, max_indices, constrain_to_boundary=False,
return_transform=False):
r"""
Return a cropped copy of this image using the given minimum and
maximum indices. Landmarks are correctly adjusted so they maintain
their position relative to the newly cropped image.
Parameters
----------
min_indices : ``(n_dims,)`` `ndarray`
The minimum index over each dimension.
max_indices : ``(n_dims,)`` `ndarray`
The maximum index over each dimension.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
cropped_image : `type(self)`
A new instance of self, but cropped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
``min_indices`` and ``max_indices`` both have to be of length
``n_dims``. All ``max_indices`` must be greater than
``min_indices``.
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices = np.floor(min_indices)
max_indices = np.ceil(max_indices)
if not (min_indices.size == max_indices.size == self.n_dims):
raise ValueError(
"Both min and max indices should be 1D numpy arrays of"
" length n_dims ({})".format(self.n_dims))
elif not np.all(max_indices > min_indices):
raise ValueError("All max indices must be greater that the min "
"indices")
min_bounded = self.constrain_points_to_bounds(min_indices)
max_bounded = self.constrain_points_to_bounds(max_indices)
all_max_bounded = np.all(min_bounded == min_indices)
all_min_bounded = np.all(max_bounded == max_indices)
if not (constrain_to_boundary or all_max_bounded or all_min_bounded):
# points have been constrained and the user didn't want this -
raise ImageBoundaryError(min_indices, max_indices,
min_bounded, max_bounded)
new_shape = (max_bounded - min_bounded).astype(np.int)
return self.warp_to_shape(new_shape, Translation(min_bounded), order=0,
warp_landmarks=True,
return_transform=return_transform)
def crop_to_pointcloud(self, pointcloud, boundary=0,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the bounds of the pointcloud.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices, max_indices = pointcloud.bounds(boundary=boundary)
return self.crop(min_indices, max_indices,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_landmarks(self, group=None, boundary=0,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a set
of landmarks with an optional ``n_pixel`` boundary
Parameters
----------
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to its landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud(
pc, boundary=boundary, constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_pointcloud_proportion(self, pointcloud, boundary_proportion,
minimum=True,
constrain_to_boundary=True,
return_transform=False):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the pointclouds' per-dimension range; if ``False`` w.r.t.
the maximum value of the pointclouds' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the border proportional to
the pointcloud spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
if minimum:
boundary = boundary_proportion * np.min(pointcloud.range())
else:
boundary = boundary_proportion * np.max(pointcloud.range())
return self.crop_to_pointcloud(
pointcloud, boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def crop_to_landmarks_proportion(self, boundary_proportion,
group=None, minimum=True,
constrain_to_boundary=True,
return_transform=False):
r"""
Crop this image to be bounded around a set of landmarks with a
border proportional to the landmark spread or range.
Parameters
----------
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the landmarks' per-dimension range; if ``False`` w.r.t. the
maximum value of the landmarks' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
This image, cropped to its landmarks with a border proportional to
the landmark spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud_proportion(
pc, boundary_proportion, minimum=minimum,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform)
def constrain_points_to_bounds(self, points):
r"""
Constrains the points provided to be within the bounds of this image.
Parameters
----------
points : ``(d,)`` `ndarray`
Points to be snapped to the image boundaries.
Returns
-------
bounded_points : ``(d,)`` `ndarray`
Points snapped to not stray outside the image edges.
"""
bounded_points = points.copy()
# check we don't stray under any edges
bounded_points[bounded_points < 0] = 0
# check we don't stray over any edges
shape = np.array(self.shape)
over_image = (shape - bounded_points) < 0
bounded_points[over_image] = shape[over_image]
return bounded_points
def extract_patches(self, patch_centers, patch_shape=(16, 16),
sample_offsets=None, as_single_array=True):
r"""
Extract a set of patches from an image. Given a set of patch centers
and a patch size, patches are extracted from within the image, centred
on the given coordinates. Sample offsets denote a set of offsets to
extract from within a patch. This is very useful if you want to extract
a dense set of features around a set of landmarks and simply sample the
same grid of patches around the landmarks.
If sample offsets are used, to access the offsets for each patch you
need to slice the resulting `list`. So for 2 offsets, the first centers
offset patches would be ``patches[:2]``.
Currently only 2D images are supported.
Parameters
----------
patch_centers : :map:`PointCloud`
The centers to extract patches around.
patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
if self.n_dims != 2:
raise ValueError('Only two dimensional patch extraction is '
'currently supported.')
if sample_offsets is None:
sample_offsets = np.zeros([1, 2], dtype=np.intp)
else:
sample_offsets = np.require(sample_offsets, dtype=np.intp)
patch_centers = np.require(patch_centers.points, dtype=np.float,
requirements=['C'])
single_array = extract_patches(self.pixels, patch_centers,
np.asarray(patch_shape, dtype=np.intp),
sample_offsets)
if as_single_array:
return single_array
else:
return [Image(o, copy=False) for p in single_array for o in p]
def extract_patches_around_landmarks(
self, group=None, patch_shape=(16, 16),
sample_offsets=None, as_single_array=True):
r"""
Extract patches around landmarks existing on this image. Provided the
group label and optionally the landmark label extract a set of patches.
See `extract_patches` for more information.
Currently only 2D images are supported.
Parameters
----------
group : `str` or ``None``, optional
The landmark group to use as patch centres.
patch_shape : `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
return self.extract_patches(self.landmarks[group],
patch_shape=patch_shape,
sample_offsets=sample_offsets,
as_single_array=as_single_array)
def set_patches(self, patches, patch_centers, offset=None,
offset_index=None):
r"""
Set the values of a group of patches into the correct regions of a copy
of this image. Given an array of patches and a set of patch centers,
the patches' values are copied in the regions of the image that are
centred on the coordinates of the given centers.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
# parse arguments
if self.n_dims != 2:
raise ValueError('Only two dimensional patch insertion is '
'currently supported.')
if offset is None:
offset = np.zeros([1, 2], dtype=np.intp)
elif isinstance(offset, tuple) or isinstance(offset, list):
offset = np.asarray([offset])
offset = np.require(offset, dtype=np.intp)
if not offset.shape == (1, 2):
raise ValueError('The offset must be a tuple, a list or a '
'numpy.array with shape (1, 2).')
if offset_index is None:
offset_index = 0
# if patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(
patches, patch_centers.n_points)
copy = self.copy()
# set patches
set_patches(patches, copy.pixels, patch_centers.points, offset,
offset_index)
return copy
def set_patches_around_landmarks(self, patches, group=None,
offset=None, offset_index=None):
r"""
Set the values of a group of patches around the landmarks existing in a
copy of this image. Given an array of patches, a group and a label, the
patches' values are copied in the regions of the image that are
centred on the coordinates of corresponding landmarks.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
group : `str` or ``None`` optional
The landmark group to use as patch centres.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
return self.set_patches(patches, self.landmarks[group],
offset=offset, offset_index=offset_index)
def warp_to_mask(self, template_mask, transform, warp_landmarks=True,
order=1, mode='constant', cval=0.0, batch_size=None,
return_transform=False):
r"""
Return a copy of this image warped into a different reference space.
Note that warping into a mask is slower than warping into a full image.
If you don't need a non-linear mask, consider :meth:``warp_to_shape``
instead.
Parameters
----------
template_mask : :map:`BooleanImage`
Defines the shape of the result, and what pixels should be sampled.
transform : :map:`Transform`
Transform **from the template space back to this image**.
Defines, for each pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : :map:`MaskedImage`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
if self.n_dims != transform.n_dims:
raise ValueError(
"Trying to warp a {}D image with a {}D transform "
"(they must match)".format(self.n_dims, transform.n_dims))
template_points = template_mask.true_indices()
points_to_sample = transform.apply(template_points,
batch_size=batch_size)
sampled = self.sample(points_to_sample,
order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_image = self._build_warp_to_mask(template_mask, sampled)
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, 'path'):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def _build_warp_to_mask(self, template_mask, sampled_pixel_values):
r"""
Builds the warped image from the template mask and sampled pixel values.
Overridden for :map:`BooleanImage` as we can't use the usual
:meth:`from_vector_inplace` method. All other :map:`Image` classes
share the :map:`Image` implementation.
Parameters
----------
template_mask : :map:`BooleanImage` or 2D `bool ndarray`
Mask for warping.
sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray`
Sampled value to rebuild the masked image from.
"""
from menpo.image import MaskedImage
warped_image = MaskedImage.init_blank(template_mask.shape,
n_channels=self.n_channels,
mask=template_mask)
warped_image._from_vector_inplace(sampled_pixel_values.ravel())
return warped_image
def sample(self, points_to_sample, order=1, mode='constant', cval=0.0):
r"""
Sample this image at the given sub-pixel accurate points. The input
PointCloud should have the same number of dimensions as the image e.g.
a 2D PointCloud for a 2D multi-channel image. A numpy array will be
returned the has the values for every given point across each channel
of the image.
Parameters
----------
points_to_sample : :map:`PointCloud`
Array of points to sample from the image. Should be
`(n_points, n_dims)`
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
Returns
-------
sampled_pixels : (`n_points`, `n_channels`) `ndarray`
The interpolated values taken across every channel of the image.
"""
# The public interface is a PointCloud, but when this is used internally
# a numpy array is passed. So let's just treat the PointCloud as a
# 'special case' and not document the ndarray ability.
if isinstance(points_to_sample, PointCloud):
points_to_sample = points_to_sample.points
return scipy_interpolation(self.pixels, points_to_sample,
order=order, mode=mode, cval=cval)
def warp_to_shape(self, template_shape, transform, warp_landmarks=True,
order=1, mode='constant', cval=0.0, batch_size=None,
return_transform=False):
"""
Return a copy of this image warped into a different reference space.
Parameters
----------
template_shape : `tuple` or `ndarray`
Defines the shape of the result, and what pixel indices should be
sampled (all of them).
transform : :map:`Transform`
Transform **from the template_shape space back to this image**.
Defines, for each index on template_shape, which pixel location
should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : `type(self)`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
template_shape = np.array(template_shape, dtype=np.int)
if (isinstance(transform, Affine) and order in range(4) and
self.n_dims == 2):
# we are going to be able to go fast.
if isinstance(transform, Translation) and order == 0:
# an integer translation (e.g. a crop) If this lies entirely
# in the bounds then we can just do a copy. We need to match
# the behavior of cython_interpolation exactly, which means
# matching its rounding behavior too:
t = transform.translation_component.copy()
pos_t = t > 0.0
t[pos_t] += 0.5
t[~pos_t] -= 0.5
min_ = t.astype(np.int)
max_ = template_shape + min_
if np.all(max_ <= np.array(self.shape)) and np.all(min_ >= 0):
# we have a crop - slice the pixels.
warped_pixels = self.pixels[:,
int(min_[0]):int(max_[0]),
int(min_[1]):int(max_[1])].copy()
return self._build_warp_to_shape(warped_pixels, transform,
warp_landmarks,
return_transform)
# we couldn't do the crop, but skimage has an optimised Cython
# interpolation for 2D affine warps - let's use that
sampled = cython_interpolation(self.pixels, template_shape,
transform, order=order,
mode=mode, cval=cval)
else:
template_points = indices_for_image_of_shape(template_shape)
points_to_sample = transform.apply(template_points,
batch_size=batch_size)
sampled = self.sample(points_to_sample,
order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_pixels = sampled.reshape(
(self.n_channels,) + tuple(template_shape))
return self._build_warp_to_shape(warped_pixels, transform,
warp_landmarks, return_transform)
def _build_warp_to_shape(self, warped_pixels, transform, warp_landmarks,
return_transform):
# factored out common logic from the different paths we can take in
# warp_to_shape. Rebuilds an image post-warp, adjusting landmarks
# as necessary.
warped_image = Image(warped_pixels, copy=False)
# warp landmarks if requested.
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, 'path'):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def rescale(self, scale, round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled by a given factor.
Landmarks are rescaled appropriately.
Parameters
----------
scale : `float` or `tuple` of `floats`
The scale factor. If a tuple, the scale to apply to each dimension.
If a single `float`, the scale will be applied uniformly across
each dimension.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If less scales than dimensions are provided.
If any scale is less than or equal to 0.
"""
# Pythonic way of converting to list if we are passed a single float
try:
if len(scale) < self.n_dims:
raise ValueError(
'Must provide a scale per dimension.'
'{} scales were provided, {} were expected.'.format(
len(scale), self.n_dims
)
)
except TypeError: # Thrown when len() is called on a float
scale = [scale] * self.n_dims
# Make sure we have a numpy array
scale = np.asarray(scale)
for s in scale:
if s <= 0:
raise ValueError('Scales must be positive floats.')
transform = NonUniformScale(scale)
# use the scale factor to make the template mask bigger
# while respecting the users rounding preference.
template_shape = round_image_shape(transform.apply(self.shape),
round)
# due to image indexing, we can't just apply the pseudoinverse
# transform to achieve the scaling we want though!
# Consider a 3x rescale on a 2x4 image. Looking at each dimension:
# H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x
# W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x
# => need to make the correct scale per dimension!
shape = np.array(self.shape, dtype=np.float)
# scale factors = max_index_after / current_max_index
# (note that max_index = length - 1, as 0 based)
scale_factors = (scale * shape - 1) / (shape - 1)
inverse_transform = NonUniformScale(scale_factors).pseudoinverse()
# for rescaling we enforce that mode is nearest to avoid num. errors
return self.warp_to_shape(template_shape, inverse_transform,
warp_landmarks=True, order=order,
mode='nearest',
return_transform=return_transform)
def rescale_to_diagonal(self, diagonal, round='ceil',
return_transform=False):
r"""
Return a copy of this image, rescaled so that the it's diagonal is a
new size.
Parameters
----------
diagonal: `int`
The diagonal size of the new image.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : type(self)
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
return self.rescale(diagonal / self.diagonal(), round=round,
return_transform=return_transform)
def rescale_to_pointcloud(self, pointcloud, group=None,
round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled so that the scale of a
particular group of landmarks matches the scale of the passed
reference pointcloud.
Parameters
----------
pointcloud: :map:`PointCloud`
The reference pointcloud to which the landmarks specified by
``group`` will be scaled to match.
group : `str`, optional
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
pc = self.landmarks[group]
scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy()
return self.rescale(scale, round=round, order=order,
return_transform=return_transform)
def rescale_landmarks_to_diagonal_range(self, diagonal_range, group=None,
round='ceil', order=1,
return_transform=False):
r"""
Return a copy of this image, rescaled so that the ``diagonal_range`` of
the bounding box containing its landmarks matches the specified
``diagonal_range`` range.
Parameters
----------
diagonal_range: ``(n_dims,)`` `ndarray`
The diagonal_range range that we want the landmarks of the returned
image to have.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
round : ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
x, y = self.landmarks[group].range()
scale = diagonal_range / np.sqrt(x ** 2 + y ** 2)
return self.rescale(scale, round=round, order=order,
return_transform=return_transform)
def resize(self, shape, order=1, return_transform=False):
r"""
Return a copy of this image, resized to a particular shape.
All image information (landmarks, and mask in the case of
:map:`MaskedImage`) is resized appropriately.
Parameters
----------
shape : `tuple`
The new shape to resize to.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the resize is also returned.
Returns
-------
resized_image : ``type(self)``
A copy of this image, resized.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If the number of dimensions of the new shape does not match
the number of dimensions of the image.
"""
shape = np.asarray(shape, dtype=np.float)
if len(shape) != self.n_dims:
raise ValueError(
'Dimensions must match.'
'{} dimensions provided, {} were expected.'.format(
shape.shape, self.n_dims))
scales = shape / self.shape
# Have to round the shape when scaling to deal with floating point
# errors. For example, if we want (250, 250), we need to ensure that
# we get (250, 250) even if the number we obtain is 250 to some
# floating point inaccuracy.
return self.rescale(scales, round='round', order=order,
return_transform=return_transform)
def zoom(self, scale, cval=0.0, return_transform=False):
r"""
Return a copy of this image, zoomed about the centre point. ``scale``
values greater than 1.0 denote zooming **in** to the image and values
less than 1.0 denote zooming **out** of the image. The size of the
image will not change, if you wish to scale an image, please see
:meth:`rescale`.
Parameters
----------
scale : `float`
``scale > 1.0`` denotes zooming in. Thus the image will appear
larger and areas at the edge of the zoom will be 'cropped' out.
``scale < 1.0`` denotes zooming out. The image will be padded
by the value of ``cval``.
cval : ``float``, optional
The value to be set outside the zoomed image boundaries.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the zooming is also returned.
Returns
-------
zoomed_image : ``type(self)``
A copy of this image, zoomed.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
t = scale_about_centre(self, 1.0 / scale)
return self.warp_to_shape(self.shape, t, cval=cval,
return_transform=return_transform)
def rotate_ccw_about_centre(self, theta, degrees=True, retain_shape=False,
cval=0.0, round='round', order=1,
return_transform=False):
r"""
Return a copy of this image, rotated counter-clockwise about its centre.
Note that the `retain_shape` argument defines the shape of the rotated
image. If ``retain_shape=True``, then the shape of the rotated image
will be the same as the one of current image, so some regions will
probably be cropped. If ``retain_shape=False``, then the returned image
has the correct size so that the whole area of the current image is
included.
Parameters
----------
theta : `float`
The angle of rotation about the centre.
degrees : `bool`, optional
If ``True``, `theta` is interpreted in degrees. If ``False``,
``theta`` is interpreted as radians.
retain_shape : `bool`, optional
If ``True``, then the shape of the rotated image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
cval : `float`, optional
The value to be set outside the rotated image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rotation is also returned.
Returns
-------
rotated_image : ``type(self)``
The rotated image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
Image rotation is presently only supported on 2D images
"""
if self.n_dims != 2:
raise ValueError('Image rotation is presently only supported on '
'2D images')
rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return self.transform_about_centre(rotation, retain_shape=retain_shape,
cval=cval, round=round, order=order,
return_transform=return_transform)
def transform_about_centre(self, transform, retain_shape=False,
cval=0.0, round='round', order=1,
return_transform=False):
r"""
Return a copy of this image, transformed about its centre.
Note that the `retain_shape` argument defines the shape of the
transformed image. If ``retain_shape=True``, then the shape of the
transformed image will be the same as the one of current image, so some
regions will probably be cropped. If ``retain_shape=False``, then the
returned image has the correct size so that the whole area of the
current image is included.
.. note::
This method will not work for transforms that result in a transform
chain as :map:`TransformChain` is not invertible.
.. note::
Be careful when defining transforms for warping imgaes. All pixel
locations must fall within a valid range as expected by the
transform. Therefore, your transformation must accept 'negative'
pixel locations as the pixel locations provided to your transform
will have the object centre subtracted from them.
Parameters
----------
transform : :map:`ComposableTransform` and :map:`VInvertible` type
A composable transform. ``pseudoinverse`` will be invoked on the
resulting transform so it must implement a valid inverse.
retain_shape : `bool`, optional
If ``True``, then the shape of the sheared image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
cval : `float`, optional
The value to be set outside the sheared image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the shearing is also returned.
Returns
-------
transformed_image : ``type(self)``
The transformed image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Examples
--------
This is an example for rotating an image about its center. Let's
first load an image, create the rotation transform and then apply it ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Rotation
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
rot_tr = Rotation.init_from_2d_ccw_angle(45)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render rotated image
plt.subplot(132)
im.transform_about_centre(rot_tr).view_landmarks()
plt.title('Rotated')
# Render rotated image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks()
plt.title('Rotated (Retain original shape)')
Similarly, in order to apply a shear transform ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Affine
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
shear_tr = Affine.init_from_2d_shear(25, 10)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render sheared image
plt.subplot(132)
im.transform_about_centre(shear_tr).view_landmarks()
plt.title('Sheared')
# Render sheared image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(shear_tr,
retain_shape=True).view_landmarks()
plt.title('Sheared (Retain original shape)')
"""
if retain_shape:
shape = self.shape
applied_transform = transform_about_centre(self, transform)
else:
# Get image's bounding box coordinates
original_bbox = bounding_box((0, 0),
np.array(self.shape) - 1)
# Translate to origin and apply transform
trans = Translation(-self.centre(),
skip_checks=True).compose_before(transform)
transformed_bbox = trans.apply(original_bbox)
# Create new translation so that min bbox values go to 0
t = Translation(-transformed_bbox.bounds()[0])
applied_transform = trans.compose_before(t)
transformed_bbox = trans.apply(original_bbox)
# Output image's shape is the range of the sheared bounding box
# while respecting the users rounding preference.
shape = round_image_shape(transformed_bbox.range() + 1, round)
# Warp image
return self.warp_to_shape(
shape, applied_transform.pseudoinverse(), order=order,
warp_landmarks=True, cval=cval, return_transform=return_transform)
def mirror(self, axis=1, return_transform=False):
r"""
Return a copy of this image, mirrored/flipped about a certain axis.
Parameters
----------
axis : `int`, optional
The axis about which to mirror the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the mirroring is also returned.
Returns
-------
mirrored_image : ``type(self)``
The mirrored image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
axis cannot be negative
ValueError
axis={} but the image has {} dimensions
"""
# Check axis argument
if axis < 0:
raise ValueError('axis cannot be negative')
elif axis >= self.n_dims:
raise ValueError("axis={} but the image has {} "
"dimensions".format(axis, self.n_dims))
# Create transform that includes ...
# ... flipping about the selected axis ...
rot_matrix = np.eye(self.n_dims)
rot_matrix[axis, axis] = -1
# ... and translating back to the image's bbox
tr_matrix = np.zeros(self.n_dims)
tr_matrix[axis] = self.shape[axis] - 1
# Create transform object
trans = Rotation(rot_matrix, skip_checks=True).compose_before(
Translation(tr_matrix, skip_checks=True))
# Warp image
return self.warp_to_shape(self.shape, trans.pseudoinverse(),
warp_landmarks=True,
return_transform=return_transform)
def pyramid(self, n_levels=3, downscale=2):
r"""
Return a rescaled pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
image = self.copy()
yield image
for _ in range(n_levels - 1):
image = image.rescale(1.0 / downscale)
yield image
def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None):
r"""
Return the gaussian pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
sigma : `float`, optional
Sigma for gaussian filter. Default is ``downscale / 3.`` which
corresponds to a filter mask twice the size of the scale factor
that covers more than 99% of the gaussian distribution.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
from menpo.feature import gaussian_filter
if sigma is None:
sigma = downscale / 3.
image = self.copy()
yield image
for level in range(n_levels - 1):
image = gaussian_filter(image, sigma).rescale(1.0 / downscale)
yield image
def as_greyscale(self, mode='luminosity', channel=None):
r"""
Returns a greyscale version of the image. If the image does *not*
represent a 2D RGB image, then the ``luminosity`` mode will fail.
Parameters
----------
mode : ``{average, luminosity, channel}``, optional
============== =====================================================
mode Greyscale Algorithm
============== =====================================================
average Equal average of all channels
luminosity Calculates the luminance using the CCIR 601 formula:
| .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B'
channel A specific channel is chosen as the intensity value.
============== =====================================================
channel: `int`, optional
The channel to be taken. Only used if mode is ``channel``.
Returns
-------
greyscale_image : :map:`MaskedImage`
A copy of this image in greyscale.
"""
greyscale = self.copy()
if mode == 'luminosity':
if self.n_dims != 2:
raise ValueError("The 'luminosity' mode only works on 2D RGB"
"images. {} dimensions found, "
"2 expected.".format(self.n_dims))
elif self.n_channels != 3:
raise ValueError("The 'luminosity' mode only works on RGB"
"images. {} channels found, "
"3 expected.".format(self.n_channels))
# Only compute the coefficients once.
global _greyscale_luminosity_coef
if _greyscale_luminosity_coef is None:
_greyscale_luminosity_coef = np.linalg.inv(
np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.106, 1.703]]))[0, :]
# Compute greyscale via dot product
pixels = np.dot(_greyscale_luminosity_coef,
greyscale.pixels.reshape(3, -1))
# Reshape image back to original shape (with 1 channel)
pixels = pixels.reshape(greyscale.shape)
elif mode == 'average':
pixels = np.mean(greyscale.pixels, axis=0)
elif mode == 'channel':
if channel is None:
raise ValueError("For the 'channel' mode you have to provide"
" a channel index")
pixels = greyscale.pixels[channel]
else:
raise ValueError("Unknown mode {} - expected 'luminosity', "
"'average' or 'channel'.".format(mode))
# Set new pixels - ensure channel axis and maintain
greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype,
copy=False)
return greyscale
def as_PILImage(self, out_dtype=np.uint8):
r"""
Return a PIL copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
pil_image : `PILImage`
PIL copy of image
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 is supported.
"""
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
'Can only convert greyscale or RGB 2D images. '
'Received a {} channel {}D image.'.format(self.n_channels,
self.n_dims))
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
pixels = denormalize_pixels_range(pixels, out_dtype)
return PILImage.fromarray(pixels)
def as_imageio(self, out_dtype=np.uint8):
r"""
Return an Imageio copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
imageio_image : `ndarray`
Imageio image (which is just a numpy ndarray with the channels
as the last axis).
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 and uint16
are supported.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .pixels_with_channels_at_back instead.',
MenpoDeprecationWarning)
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
'Can only convert greyscale or RGB 2D images. '
'Received a {} channel {}D image.'.format(self.n_channels,
self.n_dims))
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
return denormalize_pixels_range(pixels, out_dtype)
def pixels_range(self):
r"""
The range of the pixel values (min and max pixel values).
Returns
-------
min_max : ``(dtype, dtype)``
The minimum and maximum value of the pixels array.
"""
return self.pixels.min(), self.pixels.max()
def rolled_channels(self):
r"""
Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .pixels_with_channels_at_back() instead.',
MenpoDeprecationWarning)
return self.pixels_with_channels_at_back()
def pixels_with_channels_at_back(self, out_dtype=None):
r"""
Returns the pixels matrix, with the channels rolled to the back axis.
This may be required for interacting with external code bases that
require images to have channels as the last axis, rather than the
Menpo convention of channels as the first axis.
If this image is single channel, the final axis is dropped.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
rolled_channels : `ndarray`
Pixels with channels as the back (last) axis. If single channel,
the last axis will be dropped.
"""
p = channels_to_back(self.pixels)
if out_dtype is not None:
p = denormalize_pixels_range(p, out_dtype=out_dtype)
return np.squeeze(p)
def __str__(self):
return ('{} {}D Image with {} channel{}'.format(
self._str_shape(), self.n_dims, self.n_channels,
's' * (self.n_channels > 1)))
def has_landmarks_outside_bounds(self):
"""
Indicates whether there are landmarks located outside the image bounds.
:type: `bool`
"""
if self.has_landmarks:
for l_group in self.landmarks:
pc = self.landmarks[l_group].points
if np.any(np.logical_or(self.shape - pc < 1, pc < 0)):
return True
return False
def constrain_landmarks_to_bounds(self):
r"""
Deprecated - please use the equivalent ``constrain_to_bounds`` method
now on PointCloud, in conjunction with the new Image ``bounds()``
method. For example:
>>> im.constrain_landmarks_to_bounds() # Equivalent to below
>>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds())
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .constrain_to_bounds() instead (on PointCloud).',
MenpoDeprecationWarning)
for l_group in self.landmarks:
l = self.landmarks[l_group]
for k in range(l.points.shape[1]):
tmp = l.points[:, k]
tmp[tmp < 0] = 0
tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1
l.points[:, k] = tmp
self.landmarks[l_group] = l
def normalize_std(self, mode='all', **kwargs):
r"""
Returns a copy of this image normalized such that its
pixel values have zero mean and unit variance.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .normalize_std() instead (features package).',
MenpoDeprecationWarning)
return self._normalize(np.std, mode=mode)
def normalize_norm(self, mode='all', **kwargs):
r"""
Returns a copy of this image normalized such that its pixel values
have zero mean and its norm equals 1.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
unit norm.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn('This method is no longer supported and will be removed in a '
'future version of Menpo. '
'Use .normalize_norm() instead (features package).',
MenpoDeprecationWarning)
def scale_func(pixels, axis=None):
return np.linalg.norm(pixels, axis=axis, **kwargs)
return self._normalize(scale_func, mode=mode)
def _normalize(self, scale_func, mode='all'):
from menpo.feature import normalize
return normalize(self, scale_func=scale_func, mode=mode)
def rescale_pixels(self, minimum, maximum, per_channel=True):
r"""A copy of this image with pixels linearly rescaled to fit a range.
Note that the only pixels that will be considered and rescaled are those
that feature in the vectorized form of this image. If you want to use
this routine on all the pixels in a :map:`MaskedImage`, consider
using `as_unmasked()` prior to this call.
Parameters
----------
minimum: `float`
The minimal value of the rescaled pixels
maximum: `float`
The maximal value of the rescaled pixels
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
v = self.as_vector(keep_channels=True).T
if per_channel:
min_, max_ = v.min(axis=0), v.max(axis=0)
else:
min_, max_ = v.min(), v.max()
sf = ((maximum - minimum) * 1.0) / (max_ - min_)
v_new = ((v - min_) * sf) + minimum
return self.from_vector(v_new.T.ravel())
def clip_pixels(self, minimum=None, maximum=None):
r"""A copy of this image with pixels linearly clipped to fit a range.
Parameters
----------
minimum: `float`, optional
The minimal value of the clipped pixels. If None is provided, the
default value will be 0.
maximum: `float`, optional
The maximal value of the clipped pixels. If None is provided, the
default value will depend on the dtype.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
if minimum is None:
minimum = 0
if maximum is None:
dtype = self.pixels.dtype
if dtype == np.uint8:
maximum = 255
elif dtype == np.uint16:
maximum = 65535
elif dtype in [np.float32, np.float64]:
maximum = 1.0
else:
m1 = 'Could not recognise the dtype ({}) to set the maximum.'
raise ValueError(m1.format(dtype))
copy = self.copy()
copy.pixels = copy.pixels.clip(min=minimum, max=maximum)
return copy
def rasterize_landmarks(self, group=None, render_lines=True, line_style='-',
line_colour='b', line_width=1, render_markers=True,
marker_style='o', marker_size=1,
marker_face_colour='b', marker_edge_colour='b',
marker_edge_width=1, backend='matplotlib'):
r"""
This method provides the ability to rasterize 2D landmarks onto the
image. The returned image has the specified landmark groups rasterized
onto the image - which is useful for things like creating result
examples or rendering videos with annotations.
Since multiple landmark groups can be specified, all arguments can take
lists of parameters that map to the provided groups list. Therefore, the
parameters must be lists of the correct length or a single parameter to
apply to every landmark group.
Multiple backends are provided, all with different strengths. The
'pillow' backend is very fast, but not very flexible. The `matplotlib`
backend should be feature compatible with other Menpo rendering methods,
but is much slower due to the overhead of creating a figure to render
into.
Parameters
----------
group : `str` or `list` of `str`, optional
The landmark group key, or a list of keys.
render_lines : `bool`, optional
If ``True``, and the provided landmark group is a
:map:`PointDirectedGraph`, the edges are rendered.
line_style : `str`, optional
The style of the edge line. Not all backends support this argument.
line_colour : `str` or `tuple`, optional
A Matplotlib style colour or a backend dependant colour.
line_width : `int`, optional
The width of the line to rasterize.
render_markers : `bool`, optional
If ``True``, render markers at the coordinates of each landmark.
marker_style : `str`, optional
A Matplotlib marker style. Not all backends support all marker
styles.
marker_size : `int`, optional
The size of the marker - different backends use different scale
spaces so consistent output may by difficult.
marker_face_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_width : `int`, optional
The width of the marker edge. Not all backends support this.
backend : {'matplotlib', 'pillow'}, optional
The backend to use.
Returns
-------
rasterized_image : :map:`Image`
The image with the landmarks rasterized directly into the pixels.
Raises
------
ValueError
Only 2D images are supported.
ValueError
Only RGB (3-channel) or Greyscale (1-channel) images are supported.
"""
from .rasterize import rasterize_landmarks_2d
return rasterize_landmarks_2d(
self, group=group, render_lines=render_lines,
line_style=line_style, line_colour=line_colour,
line_width=line_width, render_markers=render_markers,
marker_style=marker_style, marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, backend=backend)
def round_image_shape(shape, round):
if round not in ['ceil', 'round', 'floor']:
raise ValueError('round must be either ceil, round or floor')
# Ensure that the '+' operator means concatenate tuples
return tuple(getattr(np, round)(shape).astype(np.int))
def _convert_patches_list_to_single_array(patches_list, n_center):
r"""
Converts patches from a `list` of :map:`Image` objects to a single `ndarray`
with shape ``(n_center, n_offset, self.n_channels, patch_shape)``.
Note that these two are the formats returned by the `extract_patches()`
and `extract_patches_around_landmarks()` methods of :map:`Image` class.
Parameters
----------
patches_list : `list` of `n_center * n_offset` :map:`Image` objects
A `list` that contains all the patches as :map:`Image` objects.
n_center : `int`
The number of centers from which the patches are extracted.
Returns
-------
patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)``
The numpy array that contains all the patches.
"""
n_offsets = np.int(len(patches_list) / n_center)
n_channels = patches_list[0].n_channels
height = patches_list[0].height
width = patches_list[0].width
patches_array = np.empty((n_center, n_offsets, n_channels, height, width),
dtype=patches_list[0].pixels.dtype)
total_index = 0
for p in range(n_center):
for o in range(n_offsets):
patches_array[p, o, ...] = patches_list[total_index].pixels
total_index += 1
return patches_array
def _create_patches_image(patches, patch_centers, patches_indices=None,
offset_index=None, background='black'):
r"""
Creates an :map:`Image` object in which the patches are located on the
correct regions based on the centers. Thus, the image is a block-sparse
matrix. It has also two attached :map:`PointCloud` objects. The
`all_patch_centers` one contains all the patch centers, while the
`selected_patch_centers` one contains only the centers that correspond to
the patches that the user selected to set.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be set (copied) to the image. If ``None``,
then all the patches are copied.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
Returns
-------
patches_image : :map:`Image`
The output patches image object.
Raises
------
ValueError
Background must be either ''black'' or ''white''.
"""
# If patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches,
patch_centers.n_points)
# Parse inputs
if offset_index is None:
offset_index = 0
if patches_indices is None:
patches_indices = np.arange(patches.shape[0])
elif not isinstance(patches_indices, Iterable):
patches_indices = [patches_indices]
# Compute patches image's shape
n_channels = patches.shape[2]
patch_shape0 = patches.shape[3]
patch_shape1 = patches.shape[4]
top, left = np.min(patch_centers.points, 0)
bottom, right = np.max(patch_centers.points, 0)
min_0 = np.floor(top - patch_shape0)
min_1 = np.floor(left - patch_shape1)
max_0 = np.ceil(bottom + patch_shape0)
max_1 = np.ceil(right + patch_shape1)
height = max_0 - min_0 + 1
width = max_1 - min_1 + 1
# Translate the patch centers to fit in the new image
new_patch_centers = patch_centers.copy()
new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]])
# Create temporary pointcloud with the selected patch centers
tmp_centers = PointCloud(new_patch_centers.points[patches_indices])
# Create new image with the correct background values
if background == 'black':
patches_image = Image.init_blank(
(height, width), n_channels,
fill=np.min(patches[patches_indices]),
dtype=patches.dtype)
elif background == 'white':
patches_image = Image.init_blank(
(height, width), n_channels,
fill=np.max(patches[patches_indices]),
dtype=patches.dtype)
else:
raise ValueError('Background must be either ''black'' or ''white''.')
# Attach the corrected patch centers
patches_image.landmarks['all_patch_centers'] = new_patch_centers
patches_image.landmarks['selected_patch_centers'] = tmp_centers
# Set the patches
return patches_image.set_patches_around_landmarks(
patches[patches_indices], group='selected_patch_centers',
offset_index=offset_index)
| bsd-3-clause |
tuos/FlowAndCorrelations | healpy/isotropic/analysis_iso.py | 1 | 2120 |
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.special as spc
import math
import matplotlib as mpl
from scipy.special import lpmn
import scipy.integrate as integrate
from scipy.integrate import quad
from numpy import sin, cos
from matplotlib.cm import ScalarMappable
import random
# Set the number of sources and the coordinates for the input
nsources = int(199999)
nside = 32
npix = hp.nside2npix(nside)
SIZE = 400
DPI = 100
# Coordinates and the density field f
#thetas = np.random.random(nsources) * np.pi
#phis = np.random.random(nsources) * np.pi * 2.
#fs = np.random.randn(nsources)
'''
with open("eventFileFullTot_iso.txt") as inputFile2:
lines2 = inputFile2.readlines()
thetas2 = []
phis2 = []
for i in range(nsources):
thetas2.append(float(lines2[i+1].split()[1]))
phis2.append(float(lines2[i+1].split()[2]))
indices2 = hp.ang2pix(nside, thetas2, phis2)
'''
hpxmap2 = np.zeros(npix, dtype = np.float)
events = 8000
mult = 2500
for i in range(events):
for k in range(mult):
ipix = random.randint(0, npix-1)
#hpxmap2[indices2[i]] += 1.0
hpxmap2[ipix] += 1
#hpxmap2[1] = 50
#hpxmap2[npix-2] = 150
#hp_smoothed = hp.sphtfunc.smoothing(hpxmap2, fwhm=np.radians(1), iter = 1)
hp.mollview(hpxmap2, cmap = cm.jet, xsize = SIZE, min = events*mult/npix*0.9, max = events*mult/npix*1.1, title='Isotropic randomised')
#fig = plt.gcf()
#ax = plt.gca()
#norm = mpl.colors.Normalize(vmin = 50, vmax = 150)
#image = ax.get_images()[0]
#cmap = fig.colorbar(image, norm = norm, orientation = 'horizontal')
hp.graticule()
plt.savefig("map_iso.png", dpi = DPI)
'''
cl = []
for l in range(24):
cl.append(0)
i = 0
events = 100
for x in range(events):
i += 1
j = x+1
phis = []
thetas = []
#print(lines[i+1].split()[0])
thetas.append(float(lines[i].split()[1]))
phis.append(float(lines[i].split()[2]))
i+=1
indices = hp.ang2pix(nside, thetas, phis)
hpxmap = np.zeros(npix, dtype = np.float)
for k in range(len(thetas)):
hpxmap[indices[k]] += 1.0
'''
| mit |
DerThorsten/seglib | seglibpython/seglib/histogram/__init__.py | 1 | 7483 | from _histogram import *
import numpy
import vigra
from scipy.ndimage.filters import gaussian_filter1d as GaussianFilter1d
"""
def gaussianSmoothing1d(histograms,sigma):
nHist = histograms.shape[0]
nBins = histograms.shape[1]
kernel=vigra.filters.Kernel1D()
kernel.initDiscreteGaussian(sigma)
smoothedHistograms=vigra.filters.convolveOneDimension(histograms, dim=1, kernel=kernel)
return smoothedHistograms
"""
from scipy.ndimage.filters import gaussian_filter as GaussainFilter
def histogram(image,dmin=None,dmax=None,bins=32,r=3,sigma=[2.0,1.0],out=None):
img = numpy.require(image,dtype=numpy.float32)
nChannels = img.shape[2]
flat = img.reshape([-1,nChannels])
if dmin is None :
dmin = numpy.require(numpy.min(flat,axis=0),dtype=numpy.float32)
if dmax is None :
dmax = numpy.require(numpy.max(flat,axis=0),dtype=numpy.float32)
# computet the actual histogram
rawHist = _histogram._batchHistogram_( img=img,dmin=dmin,dmax=dmax,bins=bins,r=r,out=out)
if sigma is None :
return rawHist
else :
if isinstance(sigma,(float,int,long)):
sigmas = [sigma]
else :
sigmas = sigma
assert len(sigmas)<=2
if len(sigmas)==1 :
# only smooth bins
for c in range(nChannels):
cHist = rawHist[:,:,c,:]
kernel=vigra.filters.Kernel1D()
kernel.initDiscreteGaussian(float(sigmas[0]))
#kernel.setBorderTreatment()
smoothedHistograms=vigra.filters.convolveOneDimension(cHist, dim=2, kernel=kernel)
rawHist[:,:,c,:] = smoothedHistograms[:,:,:]
return rawHist
if len(sigmas)==2 :
# smooth bins ans spatial
for c in range(nChannels):
cHist = rawHist[:,:,c,:]
s = [sigmas[1]]*2 + [sigmas[0]]
smoothedHistograms = GaussainFilter(cHist,sigma=s,order=0)#,mode='constant',cval=0.0)
rawHist[:,:,c,:] = smoothedHistograms[:,:,:]
return rawHist
def batchJointHistogram(img,r=1,bins=5,sigma=[1.0,1.0]):
nCp = img.shape[2]/3
outShape = [img.shape[0],img.shape[1],nCp,bins,bins,bins]
out = numpy.zeros(outShape,dtype=numpy.float32)
for cp in range(nCp):
inputImg = img[:,:,cp*3:(cp+1)*3]
cOut = out[:,:,cp,:,:,:]
cOut = jointHistogram(image=inputImg,bins=bins,r=r,sigma=sigma,out=cOut)
return out
def jointHistogram(image,dmin=None,dmax=None,bins=5.0,r=1,sigma=[1.0,1.0],out=None):
#img = numpy.require(image,dtype=numpy.float32)
img = image
nChannels = img.shape[2]
flat = img.reshape([-1,nChannels])
#print "flatshape",flat.shape
assert nChannels == 3
if dmin is None :
dmin = numpy.require(numpy.min(flat,axis=0),dtype=numpy.float32)
dmin = [float(dmin[x]) for x in range(3)]
if dmax is None :
dmax = numpy.require(numpy.max(flat,axis=0),dtype=numpy.float32)
dmax = [float(dmax[x]) for x in range(3)]
b = (bins,bins,bins)
#print dmin
#print dmax
imgHist = _histogram._jointColorHistogram_(img=img,dmin=dmin,dmax=dmax,bins=b,r=r,out=out)
if sigma is not None :
s = sigma[1]*2 + sigma[0]*3
imgHist = GaussainFilter(imgHist,sigma=s,order=0)
return imgHist
def labelHistogram(img,nLabels,r=1,sigma=1.0,out=None,visu=False):
nInputLabelings = img.shape[2]
labels = numpy.require(img,dtype=numpy.uint64)
labelHist = _histogram._label_histogram_(img=labels,nLabels=long(nLabels),r=long(r),out=out)
# convolce along x and y axis ( 0 and 1 )
labelHistTmp = labelHist.copy()
labelHistTmp = GaussianFilter1d(labelHist, sigma=sigma, axis=0, order=0, output=None, mode='reflect', cval=0.0)
labelHist = GaussianFilter1d(labelHistTmp, sigma=sigma, axis=1 , order=0, output=None, mode='reflect', cval=0.0)
#print "difference",numpy.sum(numpy.abs(labelHistTmp-labelHist))
if visu :
import pylab
import matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
for i in range(nInputLabelings):
for l in range(nLabels):
limg = labelHist[:,:,i,l]
pylab.imshow ( numpy.swapaxes(limg,0,1), cmap = "jet")
pylab.show()
return labelHist
def labelHistogramNew(img,nLabels,labelSim,r=1,sigma=1.0,out=None,visu=False):
dx,dy = img.shape[0:2]
ndim = img.ndim
# a single labeling
if ndim == 2 :
assert labelSim.ndim==2
assert labelSim.shape[0]==nLabels
assert labelSim.shape[1]==nLabels
img = img.reshape([dx,dy,1])
labelSim=labelSim.reshape([nLabels,nLabels,1])
# multiple labelings
elif ndim == 3:
assert labelSim.ndim==3
assert labelSim.shape[0]==img.shape[2]
assert labelSim.shape[1]==nLabels
assert labelSim.shape[2]==nLabels
else :
raise RuntimeError("""img.ndim must be either 2 (a single labeling) or 3 (multiple labelings).
the axis ordering must be (x,y) or (x,y,bins)
""");
img = numpy.require(img,dtype=numpy.uint64)
labelSim = numpy.require(labelSim,dtype=numpy.float32)
print img.shape,labelSim.shape
print labelSim[1,2,0],labelSim[2,1,0]
"""
labelSim[:]=0.0
for l in range(nLabels ):
labelSim[l,l,0]=1.0
"""
labelHist=_histogram._label_sim_histogram_(
img=img.copy(),
labelSim=labelSim.copy(),
nLabels=nLabels,
r=r
)
if sigma is not None and sigma >=0.05:
# convolce along x and y axis ( 0 and 1 )
labelHistTmp = labelHist.copy()
labelHistTmp = GaussianFilter1d(labelHist, sigma=sigma, axis=0, order=0, output=None, mode='reflect', cval=0.0)
labelHist = GaussianFilter1d(labelHistTmp, sigma=sigma, axis=1 , order=0, output=None, mode='reflect', cval=0.0)
if visu :
import pylab
import matplotlib
cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3))
for i in range(img.shape[2]):
for l in range(nLabels):
limg = labelHist[:,:,i,l]
print "mima",limg.min(),limg.max()
pylab.imshow ( numpy.swapaxes(limg,0,1), cmap = "jet")
pylab.show()
return labelHist
def centerDistToBinSimilarity(centers,norm,gamma,truncate=0.000001):
c = centers.copy()
c-=c.min()
c/=c.max()
k = centers.shape[0]
f = centers.shape[1]
#Print "k",k,"f",f
diffarray = numpy.zeros([k,k],dtype=numpy.float32)
for k1 in range(k-1):
for k2 in range(k1+1,k):
d = numpy.sum(numpy.abs(centers[k1,:]-centers[k2,:])**norm)
#print k1,k2,"diffssss",d
diffarray[k1,k2]=d
diffarray[k2,k1]=d
r = numpy.exp(-gamma*diffarray)
for kk in range(k):
r[kk,kk]=1.0
for kk in range(k):
r[kk,:]=r[kk,:]/numpy.sum(r[kk,:])
#rint r
r[r<truncate]=0.0
for kk in range(k):
r[kk,:]=r[kk,:]/numpy.sum(r[kk,:])
#print r
#for k1 in range(k-1):
# print k1,k1,"diffssss",r[k1,k1],"d",diffarray[k1,k1]
# for k2 in range(k1+1,k):
# print k1,k2,"diffssss",r[k1,k2],"d",diffarray[k1,k2]
return r | mit |
maniksingh92/crowdsource-platform | crowdsourcing/models.py | 2 | 22798 | from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from oauth2client.django_orm import FlowField, CredentialsField
from crowdsourcing.utils import get_delimiter
import pandas as pd
import os
class RegistrationModel(models.Model):
user = models.OneToOneField(User)
activation_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class PasswordResetModel(models.Model):
user = models.OneToOneField(User)
reset_key = models.CharField(max_length=40)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Region(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the region!', })
code = models.CharField(max_length=16, error_messages={'required': 'Please specify the region code!', })
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Country(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the country!', })
code = models.CharField(max_length=8, error_messages={'required': 'Please specify the country code!', })
region = models.ForeignKey(Region)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class City(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the city!', })
country = models.ForeignKey(Country)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s' % (self.name)
class Address(models.Model):
street = models.CharField(max_length=128, error_messages={'required': 'Please specify the street name!', })
country = models.ForeignKey(Country)
city = models.ForeignKey(City)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
def __unicode__(self):
return u'%s, %s, %s' % (self.street, self.city, self.country)
class Role(models.Model):
name = models.CharField(max_length=32, unique=True, error_messages={'required': 'Please specify the role name!',
'unique': 'The role %(value)r already exists. Please provide another name!'})
is_active = models.BooleanField(default=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Language(models.Model):
name = models.CharField(max_length=64, error_messages={'required': 'Please specify the language!'})
iso_code = models.CharField(max_length=8)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserProfile(models.Model):
user = models.OneToOneField(User)
gender_choices = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=gender_choices)
address = models.ForeignKey(Address, null=True)
birthday = models.DateField(null=True, error_messages={'invalid': "Please enter a correct date format"})
nationality = models.ManyToManyField(Country, through='UserCountry')
verified = models.BooleanField(default=False)
picture = models.BinaryField(null=True)
friends = models.ManyToManyField('self', through='Friendship',
symmetrical=False)
roles = models.ManyToManyField(Role, through='UserRole')
deleted = models.BooleanField(default=False)
languages = models.ManyToManyField(Language, through='UserLanguage')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserCountry(models.Model):
country = models.ForeignKey(Country)
user = models.ForeignKey(UserProfile)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Skill(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the skill name!"})
description = models.CharField(max_length=512, error_messages={'required': "Please enter the skill description!"})
verified = models.BooleanField(default=False)
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Worker(models.Model):
profile = models.OneToOneField(UserProfile)
skills = models.ManyToManyField(Skill, through='WorkerSkill')
deleted = models.BooleanField(default=False)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class WorkerSkill(models.Model):
worker = models.ForeignKey(Worker)
skill = models.ForeignKey(Skill)
level = models.IntegerField(null=True)
verified = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'skill')
class Requester(models.Model):
profile = models.OneToOneField(UserProfile)
alias = models.CharField(max_length=32, error_messages={'required': "Please enter an alias!"})
class UserRole(models.Model):
user_profile = models.ForeignKey(UserProfile)
role = models.ForeignKey(Role)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Friendship(models.Model):
user_source = models.ForeignKey(UserProfile, related_name='user_source')
user_target = models.ForeignKey(UserProfile, related_name='user_target')
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Category(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the category name!"})
parent = models.ForeignKey('self', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Project(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the project name!"})
start_date = models.DateTimeField(auto_now_add=True, auto_now=False)
end_date = models.DateTimeField(auto_now_add=True, auto_now=False)
owner = models.ForeignKey(Requester, related_name='project_owner')
description = models.CharField(max_length=1024, default='')
collaborators = models.ManyToManyField(Requester, through='ProjectRequester')
keywords = models.TextField(null=True)
save_to_drive = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
categories = models.ManyToManyField(Category, through='ProjectCategory')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ProjectRequester(models.Model):
"""
Tracks the list of requesters that collaborate on a specific project
"""
requester = models.ForeignKey(Requester)
project = models.ForeignKey(Project)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('requester', 'project')
class Template(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the template name!"})
owner = models.ForeignKey(UserProfile)
source_html = models.TextField(default=None, null=True)
price = models.FloatField(default=0)
share_with_others = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Module(models.Model):
"""
aka Milestone
This is a group of similar tasks of the same kind.
Fields
-repetition: number of times a task needs to be performed
"""
name = models.CharField(max_length=128, error_messages={'required': "Please enter the module name!"})
description = models.TextField(error_messages={'required': "Please enter the module description!"})
owner = models.ForeignKey(Requester)
project = models.ForeignKey(Project, related_name='modules')
categories = models.ManyToManyField(Category, through='ModuleCategory')
keywords = models.TextField(null=True)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'In Review'),
(3, 'In Progress'),
(4, 'Completed')
)
permission_types = ((1, "Others:Read+Write::Workers:Read+Write"),
(2, 'Others:Read::Workers:Read+Write'),
(3, 'Others:Read::Workers:Read'),
(4, 'Others:None::Workers:Read')
)
status = models.IntegerField(choices=statuses, default=1)
price = models.FloatField()
repetition = models.IntegerField(default=1)
module_timeout = models.IntegerField(default=0)
has_data_set = models.BooleanField(default=False)
data_set_location = models.CharField(max_length=256, default='No data set', null=True)
task_time = models.FloatField(default=0) # in minutes
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
template = models.ManyToManyField(Template, through='ModuleTemplate')
is_micro = models.BooleanField(default=True)
is_prototype = models.BooleanField(default=False)
min_rating = models.FloatField(default=0)
allow_feedback = models.BooleanField(default=True)
feedback_permissions = models.IntegerField(choices=permission_types, default=1)
class ModuleCategory(models.Model):
module = models.ForeignKey(Module)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('category', 'module')
class ProjectCategory(models.Model):
project = models.ForeignKey(Project)
category = models.ForeignKey(Category)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('project', 'category')
class TemplateItem(models.Model):
name = models.CharField(max_length=128, error_messages={'required': "Please enter the name of the template item!"})
template = models.ForeignKey(Template, related_name='template_items')
id_string = models.CharField(max_length=128)
role = models.CharField(max_length=16)
icon = models.CharField(max_length=256, null=True)
data_source = models.CharField(max_length=256, null=True)
layout = models.CharField(max_length=16, default='column')
type = models.CharField(max_length=16)
sub_type = models.CharField(max_length=16)
values = models.TextField(null=True)
position = models.IntegerField()
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['position']
class ModuleTemplate(models.Model):
module = models.ForeignKey(Module)
template = models.ForeignKey(Template)
class TemplateItemProperties(models.Model):
template_item = models.ForeignKey(TemplateItem)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Task(models.Model):
module = models.ForeignKey(Module, related_name='module_tasks')
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Assigned'),
(4, 'Finished')
)
status = models.IntegerField(choices=statuses, default=1)
data = models.TextField(null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
price = models.FloatField(default=0)
class TaskWorker(models.Model):
task = models.ForeignKey(Task, related_name='task_workers')
worker = models.ForeignKey(Worker)
statuses = ((1, 'In Progress'),
(2, 'Submitted'),
(3, 'Accepted'),
(4, 'Rejected'),
(5, 'Returned'),
(6, 'Skipped')
)
task_status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
is_paid = models.BooleanField(default=False)
class TaskWorkerResult(models.Model):
task_worker = models.ForeignKey(TaskWorker, related_name='task_worker_results')
result = models.TextField(null=True)
template_item = models.ForeignKey(TemplateItem)
# TODO: To be refined
statuses = ((1, 'Created'),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class WorkerModuleApplication(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
# TODO: To be refined
statuses = ((1, "Created"),
(2, 'Accepted'),
(3, 'Rejected')
)
status = models.IntegerField(choices=statuses, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ActivityLog(models.Model):
"""
Track all user's activities: Create, Update and Delete
"""
activity = models.CharField(max_length=512)
author = models.ForeignKey(User)
created_timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
class Qualification(models.Model):
module = models.ForeignKey(Module)
# TODO: To be refined
types = ((1, "Strict"),
(2, 'Flexible'))
type = models.IntegerField(choices=types, default=1)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class QualificationItem(models.Model):
qualification = models.ForeignKey(Qualification)
attribute = models.CharField(max_length=128)
operator = models.CharField(max_length=128)
value1 = models.CharField(max_length=128)
value2 = models.CharField(max_length=128)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserLanguage(models.Model):
language = models.ForeignKey(Language)
user = models.ForeignKey(UserProfile)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Currency(models.Model):
name = models.CharField(max_length=32)
iso_code = models.CharField(max_length=8)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class UserPreferences(models.Model):
user = models.OneToOneField(User)
language = models.ForeignKey(Language)
currency = models.ForeignKey(Currency)
login_alerts = models.SmallIntegerField(default=0)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class RequesterRanking(models.Model):
requester_name = models.CharField(max_length=128)
requester_payRank = models.FloatField()
requester_fairRank = models.FloatField()
requester_speedRank = models.FloatField()
requester_communicationRank = models.FloatField()
requester_numberofReviews = models.IntegerField(default=0)
class ModuleRating(models.Model):
worker = models.ForeignKey(Worker)
module = models.ForeignKey(Module)
value = models.IntegerField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class ModuleReview(models.Model):
worker = models.ForeignKey(Worker)
anonymous = models.BooleanField(default=False)
module = models.ForeignKey(Module)
comments = models.TextField()
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
unique_together = ('worker', 'module')
class FlowModel(models.Model):
id = models.OneToOneField(User, primary_key=True)
flow = FlowField()
class AccountModel(models.Model):
name = models.CharField(max_length=128)
type = models.CharField(max_length=16)
email = models.EmailField()
access_token = models.TextField(max_length=2048)
root = models.CharField(max_length=256)
is_active = models.IntegerField()
quota = models.BigIntegerField()
used_space = models.BigIntegerField()
assigned_space = models.BigIntegerField()
status = models.IntegerField(default=quota)
owner = models.ForeignKey(User)
class CredentialsModel(models.Model):
account = models.ForeignKey(AccountModel)
credential = CredentialsField()
class TemporaryFlowModel(models.Model):
user = models.ForeignKey(User)
type = models.CharField(max_length=16)
email = models.EmailField()
class BookmarkedProjects(models.Model):
profile = models.ForeignKey(UserProfile)
project = models.ForeignKey(Project)
class Conversation(models.Model):
subject = models.CharField(max_length=64)
sender = models.ForeignKey(User, related_name='sender')
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
deleted = models.BooleanField(default=False)
recipients = models.ManyToManyField(User, through='ConversationRecipient')
class Message(models.Model):
conversation = models.ForeignKey(Conversation, related_name='messages')
sender = models.ForeignKey(User)
body = models.TextField(max_length=8192)
deleted = models.BooleanField(default=False)
status = models.IntegerField(default=1) # 1:Sent 2:Delivered 3:Read
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class ConversationRecipient(models.Model):
recipient = models.ForeignKey(User, related_name='recipients')
conversation = models.ForeignKey(Conversation, related_name='conversation_recipient')
date_added = models.DateTimeField(auto_now_add=True, auto_now=False)
class UserMessage(models.Model):
message = models.ForeignKey(Message)
user = models.ForeignKey(User)
deleted = models.BooleanField(default=False)
class RequesterInputFile(models.Model):
# TODO will need save files on a server rather than in a temporary folder
file = models.FileField(upload_to='tmp/')
deleted = models.BooleanField(default=False)
def parse_csv(self):
delimiter = get_delimiter(self.file.name)
df = pd.DataFrame(pd.read_csv(self.file, sep=delimiter))
return df.to_dict(orient='records')
def delete(self, *args, **kwargs):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = os.path.join(root, self.file.url[1:])
os.remove(path)
super(RequesterInputFile, self).delete(*args, **kwargs)
class WorkerRequesterRating(models.Model):
origin = models.ForeignKey(UserProfile, related_name='rating_origin')
target = models.ForeignKey(UserProfile, related_name='rating_target')
module = models.ForeignKey(Module, related_name='rating_module')
weight = models.FloatField(default=2)
type = models.CharField(max_length=16)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Comment(models.Model):
sender = models.ForeignKey(UserProfile, related_name='comment_sender')
body = models.TextField(max_length=8192)
parent = models.ForeignKey('self', related_name='reply_to', null=True)
deleted = models.BooleanField(default=False)
created_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
last_updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
ordering = ['created_timestamp']
class ModuleComment(models.Model):
module = models.ForeignKey(Module, related_name='modulecomment_module')
comment = models.ForeignKey(Comment, related_name='modulecomment_comment')
deleted = models.BooleanField(default=False)
class TaskComment(models.Model):
task = models.ForeignKey(Task, related_name='taskcomment_task')
comment = models.ForeignKey(Comment, related_name='taskcomment_comment')
deleted = models.BooleanField(default=False) | mit |
chengsoonong/digbeta | dchen/music/src/PLGEN1_clf.py | 2 | 2669 | import os
import sys
import gzip
import time
import numpy as np
import pickle as pkl
from sklearn.metrics import roc_auc_score
from MTC import MTC
if len(sys.argv) != 7:
print('Usage: python', sys.argv[0],
'WORK_DIR DATASET C1 C2 P TRAIN_DEV(Y/N)')
sys.exit(0)
else:
work_dir = sys.argv[1]
dataset = sys.argv[2]
C1 = float(sys.argv[3])
C2 = float(sys.argv[4])
p = float(sys.argv[5])
trndev = sys.argv[6]
# assert trndev in ['Y', 'N']
# assert trndev == 'Y'
if trndev != 'Y':
raise ValueError('trndev should be "Y"')
data_dir = os.path.join(work_dir, 'data/%s/setting3' % dataset)
fx = os.path.join(data_dir, 'X.pkl.gz')
fytrain = os.path.join(data_dir, 'Y_train.pkl.gz')
fytest = os.path.join(data_dir, 'Y_test.pkl.gz')
fcliques_train = os.path.join(data_dir, 'cliques_train.pkl.gz')
fcliques_all = os.path.join(data_dir, 'cliques_all.pkl.gz')
fprefix = 'trndev-plgen1-clf-%g-%g-%g' % (C1, C2, p)
fmodel = os.path.join(data_dir, '%s.pkl.gz' % fprefix)
fnpy = os.path.join(data_dir, '%s.npy' % fprefix)
X = pkl.load(gzip.open(fx, 'rb'))
X = np.hstack([np.ones((X.shape[0], 1)), X])
Y_train = pkl.load(gzip.open(fytrain, 'rb'))
Y_test = pkl.load(gzip.open(fytest, 'rb'))
cliques_train = pkl.load(gzip.open(fcliques_train, 'rb'))
cliques_all = pkl.load(gzip.open(fcliques_all, 'rb'))
print('C: %g, %g, p: %g' % (C1, C2, p))
print(X.shape, Y_train.shape)
print(time.strftime('%Y-%m-%d %H:%M:%S'))
if os.path.exists(fmodel):
print('evaluating ...')
clf = pkl.load(gzip.open(fmodel, 'rb')) # for evaluation
else:
print('training ...')
clf = MTC(X, Y_train, C1=C1, C2=C2, p=p, user_playlist_indices=cliques_train)
clf.fit(verbose=2, fnpy=fnpy)
if clf.trained is True:
pkl.dump(clf, gzip.open(fmodel, 'wb'))
pl2u = np.zeros(Y_train.shape[1] + Y_test.shape[1], dtype=np.int)
U = len(cliques_train)
assert len(cliques_all) == U
for u in range(U):
clq = cliques_all[u]
pl2u[clq] = u
assert np.all(clf.pl2u == pl2u[:Y_train.shape[1]])
rps = []
aucs = []
offset = Y_train.shape[1]
for j in range(Y_test.shape[1]):
y_true = Y_test[:, j].A.reshape(-1)
npos = y_true.sum()
assert npos > 0
u = pl2u[j + offset]
wj = clf.V[u, :] + clf.mu
y_pred = np.dot(X, wj).reshape(-1)
sortix = np.argsort(-y_pred)
y_ = y_true[sortix]
rps.append(np.mean(y_[:npos]))
aucs.append(roc_auc_score(y_true, y_pred))
clf.metric_score = (np.mean(rps), np.mean(aucs), len(rps), Y_test.shape[1])
pkl.dump(clf, gzip.open(fmodel, 'wb'))
print('\n%g, %g, %d / %d' % clf.metric_score)
| gpl-3.0 |
spallavolu/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
xclxxl414/rqalpha | tests/api/test_api_base.py | 1 | 20648 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.api import *
from ..utils import make_test_strategy_decorator
test_strategies = []
as_test_strategy = make_test_strategy_decorator({
"base": {
"start_date": "2016-12-01",
"end_date": "2016-12-31",
"frequency": "1d",
"accounts": {
"stock": 1000000,
"future": 1000000,
}
},
"extra": {
"log_level": "error",
},
"mod": {
"sys_progress": {
"enabled": True,
"show": True,
},
},
}, test_strategies)
@as_test_strategy()
def test_get_order():
def init(context):
context.s1 = '000001.XSHE'
context.amount = 100
def handle_bar(context, _):
order_id = order_shares(context.s1, context.amount, style=LimitOrder(9.5))
order = get_order(order_id)
assert order.order_book_id == context.s1
assert order.quantity == context.amount
assert order.unfilled_quantity + order.filled_quantity == order.quantity
return init, handle_bar
@as_test_strategy()
def test_get_open_order():
def init(context):
context.s1 = '000001.XSHE'
context.limitprice = 8.9
context.amount = 100
context.counter = 0
context.order_id = None
def handle_bar(context, _):
context.counter += 1
order = order_shares(context.s1, context.amount, style=LimitOrder(context.limitprice))
context.order_id = order.order_id
if context.counter == 2:
assert order.order_id in get_open_orders()
context.counter = 0
return init, handle_bar
@as_test_strategy()
def test_submit_order():
def init(context):
context.s1 = '000001.XSHE'
context.amount = 100
context.fired = False
def handle_bar(context, bar_dict):
if not context.fired:
submit_order(context.s1, context.amount, SIDE.BUY, bar_dict[context.s1].limit_up * 0.99)
context.fired = True
if context.fired:
assert context.portfolio.positions[context.s1].quantity == context.amount
return init, handle_bar
@as_test_strategy()
def test_cancel_order():
def init(context):
context.s1 = '000001.XSHE'
context.amount = 100
def handle_bar(context, bar_dict):
order = order_shares(context.s1, context.amount, style=LimitOrder(bar_dict[context.s1].limit_down))
cancel_order(order)
assert order.order_book_id == context.s1
assert order.filled_quantity == 0
assert order.price == bar_dict[context.s1].limit_down
assert order.status == ORDER_STATUS.CANCELLED
return init, handle_bar
@as_test_strategy()
def test_update_universe():
def init(context):
context.s1 = '000001.XSHE'
context.s2 = '600340.XSHG'
context.order_count = 0
context.amount = 100
def handle_bar(context, _):
context.order_count += 1
if context.order_count == 1:
update_universe(context.s2)
his = history_bars(context.s2, 5, '1d', 'close')
assert sorted(his.tolist()) == sorted([26.06, 26.13, 26.54, 26.6, 26.86])
return init, handle_bar
@as_test_strategy()
def test_subscribe():
def init(context):
context.f1 = 'AU88'
context.amount = 1
subscribe(context.f1)
def handle_bar(context, _):
assert context.f1 in context.universe
return init, handle_bar
@as_test_strategy()
def test_unsubscribe():
def init(context):
context.f1 = 'AU88'
context.amount = 1
subscribe(context.f1)
unsubscribe(context.f1)
def handle_bar(context, _):
assert context.f1 not in context.universe
return init, handle_bar
@as_test_strategy
def test_get_yield_curve():
def handle_bar(_, __):
df = get_yield_curve('20161101')
assert df.iloc[0, 0] == 0.019923
assert df.iloc[0, 6] == 0.021741
return handle_bar
def test_history_bars():
def handle_bar(context, _):
return_list = history_bars(context.s1, 5, '1d', 'close')
if str(context.now.date()) == '2016-12-29':
assert return_list.tolist() == [9.08, 9.12, 9.08, 9.06, 9.08]
return handle_bar
@as_test_strategy()
def test_all_instruments():
def handle_bar(context, _):
date = context.now.replace(hour=0, minute=0, second=0)
df = all_instruments('CS')
assert (df['listed_date'] <= date).all()
assert (df['de_listed_date'] >= date).all()
# assert all(not is_suspended(o) for o in df['order_book_id'])
assert (df['type'] == 'CS').all()
df1 = all_instruments('Stock')
assert sorted(df['order_book_id']) == sorted(df1['order_book_id'])
df2 = all_instruments('Future')
assert (df2['type'] == 'Future').all()
assert (df2['listed_date'] <= date).all()
assert (df2['de_listed_date'] >= date).all()
df3 = all_instruments(['Future', 'Stock'])
assert sorted(list(df['order_book_id']) + list(df2['order_book_id'])) == sorted(df3['order_book_id'])
return handle_bar
@as_test_strategy()
def test_instruments_code():
def init(context):
context.s1 = '000001.XSHE'
def handle_bar(context, _):
ins = instruments(context.s1)
assert ins.sector_code_name == '金融'
assert ins.symbol == '平安银行'
assert ins.order_book_id == context.s1
assert ins.type == 'CS'
return init, handle_bar
@as_test_strategy()
def test_sector():
def handle_bar(_, __):
assert len(sector('金融')) >= 80, "sector('金融') 返回结果少于 80 个"
return handle_bar
@as_test_strategy()
def test_industry():
def init(context):
context.s1 = '000001.XSHE'
context.s2 = '600340.XSHG'
def handle_bar(context, _):
ins_1 = instruments(context.s1)
ins_2 = instruments(context.s2)
industry_list_1 = industry(ins_1.industry_name)
industry_list_2 = industry(ins_2.industry_name)
assert context.s1 in industry_list_1
assert context.s2 in industry_list_2
return init, handle_bar
@as_test_strategy()
def test_get_trading_dates():
import datetime
def init(_):
trading_dates_list = get_trading_dates('2016-12-15', '2017-01-03')
correct_dates_list = [datetime.date(2016, 12, 15), datetime.date(2016, 12, 16), datetime.date(2016, 12, 19),
datetime.date(2016, 12, 20), datetime.date(2016, 12, 21), datetime.date(2016, 12, 22),
datetime.date(2016, 12, 23), datetime.date(2016, 12, 26), datetime.date(2016, 12, 27),
datetime.date(2016, 12, 28), datetime.date(2016, 12, 29), datetime.date(2016, 12, 30),
datetime.date(2017, 1, 3)]
assert sorted([item.strftime("%Y%m%d") for item in correct_dates_list]) == sorted(
[item.strftime("%Y%m%d") for item
in trading_dates_list])
return init
@as_test_strategy()
def test_get_previous_trading_date():
def init(_):
assert str(get_previous_trading_date('2017-01-03').date()) == '2016-12-30'
assert str(get_previous_trading_date('2016-01-03').date()) == '2015-12-31'
assert str(get_previous_trading_date('2015-01-03').date()) == '2014-12-31'
assert str(get_previous_trading_date('2014-01-03').date()) == '2014-01-02'
assert str(get_previous_trading_date('2010-01-03').date()) == '2009-12-31'
assert str(get_previous_trading_date('2009-01-03').date()) == '2008-12-31'
assert str(get_previous_trading_date('2005-01-05').date()) == '2005-01-04'
return init
@as_test_strategy()
def test_get_next_trading_date():
def init(_):
assert str(get_next_trading_date('2017-01-03').date()) == '2017-01-04'
assert str(get_next_trading_date('2007-01-03').date()) == '2007-01-04'
return init
@as_test_strategy()
def test_get_dividend():
def handle_bar(_, __):
df = get_dividend('000001.XSHE', start_date='20130104')
df_to_assert = df[df['book_closure_date'] == 20130619]
assert len(df) >= 4
assert df_to_assert[0]['dividend_cash_before_tax'] == 1.7
assert df_to_assert[0]['payable_date'] == 20130620
test_get_order_code = '''
from rqalpha.api import order_shares, get_order
def init(context):
context.s1 = '000001.XSHE'
context.amount = 100
def handle_bar(context, bar_dict):
assert 1 == 2
order_id = order_shares(context.s1, context.amount, style=LimitOrder(9.5))
order = get_order(order_id)
assert order.order_book_id == context.s1
assert order.quantity == context.amount
assert order.unfilled_quantity + order.filled_quantity == order.quantity
'''
test_get_open_order_code = '''
from rqalpha.api import order_shares, get_open_orders
def init(context):
context.s1 = '000001.XSHE'
context.limitprice = 8.9
context.amount = 100
context.counter = 0
context.order_id = None
def handle_bar(context, bar_dict):
context.counter += 1
order = order_shares(context.s1, context.amount, style=LimitOrder(context.limitprice))
context.order_id = order.order_id
print('cash: ', context.portfolio.cash)
print('check_get_open_orders done')
print(order.order_id)
# print(get_open_orders())
print(get_open_orders())
print(get_order(order.order_id))
if context.counter == 2:
assert order.order_id in get_open_orders()
context.counter = 0
'''
test_cancel_order_code = '''
from rqalpha.api import order_shares, cancel_order, get_order
def init(context):
context.s1 = '000001.XSHE'
context.limitprice = 8.59
context.amount = 100
def handle_bar(context, bar_dict):
order_id = order_shares(context.s1, context.amount, style=LimitOrder(context.limitprice))
cancel_order(order_id)
order = get_order(order_id)
assert order.order_book_id == context.s1
assert order.filled_quantity == 0
return order_id
assert order.price == context.limitprice
'''
test_update_universe_code = '''
from rqalpha.api import update_universe, history_bars
def init(context):
context.s1 = '000001.XSHE'
context.s2 = '600340.XSHG'
context.order_count = 0
context.amount = 100
def handle_bar(context, bar_dict):
context.order_count += 1
if context.order_count == 1:
update_universe(context.s2)
his = history_bars(context.s2, 5, '1d', 'close')
print(sorted(his.tolist()))
print(sorted([24.1, 23.71, 23.82, 23.93, 23.66]))
assert sorted(his.tolist()) == sorted([26.06, 26.13, 26.54, 26.6, 26.86])
'''
test_subscribe_code = '''
from rqalpha.api import subscribe
def init(context):
context.f1 = 'AU88'
context.amount = 1
subscribe(context.f1)
def handle_bar(context, bar_dict):
assert context.f1 in context.universe
'''
test_unsubscribe_code = '''
from rqalpha.api import subscribe, unsubscribe
def init(context):
context.f1 = 'AU88'
context.amount = 1
subscribe(context.f1)
unsubscribe(context.f1)
def handle_bar(context, bar_dict):
assert context.f1 not in context.universe
'''
test_get_yield_curve_code = '''
from rqalpha.api import get_yield_curve
def init(context):
pass
def handle_bar(context, bar_dict):
df = get_yield_curve('20161101')
assert df.iloc[0, 0] == 0.019923
assert df.iloc[0, 6] == 0.021741
'''
test_history_bars_code = '''
from rqalpha.api import history_bars
def init(context):
context.s1 = '000001.XSHE'
pass
def handle_bar(context, bar_dict):
return_list = history_bars(context.s1, 5, '1d', 'close')
if str(context.now.date()) == '2016-12-29':
assert return_list.tolist() == [9.08, 9.1199, 9.08, 9.06, 9.08]
'''
test_all_instruments_code = '''
from rqalpha.api import all_instruments
def init(context):
pass
def handle_bar(context, bar_dict):
df = all_instruments('FenjiA')
df_to_assert = df.loc[df['order_book_id'] == '150247.XSHE']
assert df_to_assert.iloc[0, 0] == 'CMAJ'
assert df_to_assert.iloc[0, 7] == '工银中证传媒A'
assert all_instruments().shape >= (8000, 4)
assert all_instruments('CS').shape >= (3000, 16)
assert all_instruments('ETF').shape >= (120, 9)
assert all_instruments('LOF').shape >= (130, 9)
assert all_instruments('FenjiMu').shape >= (10, 9)
assert all_instruments('FenjiA').shape >= (120, 9)
assert all_instruments('FenjiB').shape >= (140, 9)
assert all_instruments('INDX').shape >= (500, 8)
assert all_instruments('Future').shape >= (3500, 16)
'''
test_instruments_code = '''
from rqalpha.api import instruments
def init(context):
context.s1 = '000001.XSHE'
pass
def handle_bar(context, bar_dict):
print('hello')
ins = instruments(context.s1)
assert ins.sector_code_name == '金融'
assert ins.symbol == '平安银行'
assert ins.order_book_id == context.s1
assert ins.type == 'CS'
print('world')
'''
test_sector_code = '''
from rqalpha.api import sector
def init(context):
pass
def handle_bar(context, bar_dict):
assert len(sector('金融')) >= 180
'''
test_industry_code = '''
from rqalpha.api import industry, instruments
def init(context):
context.s1 = '000001.XSHE'
context.s2 = '600340.XSHG'
def handle_bar(context, bar_dict):
ins_1 = instruments(context.s1)
ins_2 = instruments(context.s2)
industry_list_1 = industry(ins_1.industry_name)
industry_list_2 = industry(ins_2.industry_name)
assert context.s1 in industry_list_1
assert context.s2 in industry_list_2
'''
test_get_trading_dates_code = '''
from rqalpha.api import get_trading_dates
import datetime
def init(context):
pass
def handle_bar(context, bar_dict):
trading_dates_list = get_trading_dates('2016-12-15', '2017-01-03')
correct_dates_list = [datetime.date(2016, 12, 15), datetime.date(2016, 12, 16), datetime.date(2016, 12, 19),
datetime.date(2016, 12, 20), datetime.date(2016, 12, 21), datetime.date(2016, 12, 22),
datetime.date(2016, 12, 23), datetime.date(2016, 12, 26), datetime.date(2016, 12, 27),
datetime.date(2016, 12, 28), datetime.date(2016, 12, 29), datetime.date(2016, 12, 30),
datetime.date(2017, 1, 3)]
assert sorted([item.strftime("%Y%m%d") for item in correct_dates_list]) == sorted([item.strftime("%Y%m%d") for item
in trading_dates_list])
'''
test_get_previous_trading_date_code = '''
from rqalpha.api import get_previous_trading_date
def init(context):
pass
def handle_bar(context, bar_dict):
assert str(get_previous_trading_date('2017-01-03').date()) == '2016-12-30'
assert str(get_previous_trading_date('2016-01-03').date()) == '2015-12-31'
assert str(get_previous_trading_date('2015-01-03').date()) == '2014-12-31'
assert str(get_previous_trading_date('2014-01-03').date()) == '2014-01-02'
assert str(get_previous_trading_date('2010-01-03').date()) == '2009-12-31'
assert str(get_previous_trading_date('2009-01-03').date()) == '2008-12-31'
assert str(get_previous_trading_date('2005-01-05').date()) == '2005-01-04'
'''
test_get_next_trading_date_code = '''
from rqalpha.api import get_next_trading_date
def init(context):
pass
def handle_bar(context, bar_dict):
assert str(get_next_trading_date('2017-01-03').date()) == '2017-01-04'
assert str(get_next_trading_date('2007-01-03').date()) == '2007-01-04'
'''
test_get_dividend_code = '''
from rqalpha.api import get_dividend
import pandas
def init(context):
pass
def handle_bar(context, bar_dict):
df = get_dividend('000001.XSHE', start_date='20130104')
df_to_assert = df.loc[df['book_closure_date'] == ' 2013-06-19']
assert df.shape >= (4, 5)
assert df_to_assert.iloc[0, 1] == 0.9838
assert df_to_assert.iloc[0, 3] == pandas.tslib.Timestamp('2013-06-20 00:00:00')
'''
return handle_bar
@as_test_strategy()
def test_current_snapshot():
def handle_bar(_, bar_dict):
snapshot = current_snapshot('000001.XSHE')
bar = bar_dict['000001.XSHE']
assert snapshot.last == bar.close
for field in (
"open", "high", "low", "prev_close", "volume", "total_turnover", "order_book_id", "datetime",
"limit_up", "limit_down"
):
assert getattr(bar, field) == getattr(snapshot, field), "snapshot.{} = {}, bar.{} = {}".format(
field, getattr(snapshot, field), field, getattr(bar, field)
)
return handle_bar
@as_test_strategy()
def test_get_position():
def assert_position(pos, obid, dir, today_quantity, old_quantity, avg_price):
assert pos.order_book_id == obid
assert pos.direction == dir, "Direction of {} is expected to be {} instead of {}".format(
pos.order_book_id, dir, pos.direction
)
assert pos.today_quantity == today_quantity
assert pos.old_quantity == old_quantity
assert pos.quantity == (today_quantity + old_quantity)
assert pos.avg_price == avg_price
def init(context):
context.counter = 0
context.expected_avg_price = None
def handle_bar(context, bar_dict):
context.counter += 1
if context.counter == 1:
order_shares("000001.XSHE", 300)
context.expected_avg_price = bar_dict["000001.XSHE"].close
elif context.counter == 5:
order_shares("000001.XSHE", -100)
elif context.counter == 10:
sell_open("RB1701", 5)
context.expected_avg_price = bar_dict["RB1701"].close
elif context.counter == 15:
buy_close("RB1701", 2)
if context.counter == 1:
pos = get_positions()[0]
assert_position(pos, "000001.XSHE", POSITION_DIRECTION.LONG, 300, 0, context.expected_avg_price)
elif 1 < context.counter < 5:
pos = get_positions()[0]
assert_position(pos, "000001.XSHE", POSITION_DIRECTION.LONG, 0, 300, context.expected_avg_price)
elif 5 <= context.counter < 10:
pos = get_position("000001.XSHE", POSITION_DIRECTION.LONG)
assert_position(pos, "000001.XSHE", POSITION_DIRECTION.LONG, 0, 200, context.expected_avg_price)
elif context.counter == 10:
pos = get_position("RB1701", POSITION_DIRECTION.SHORT)
assert_position(pos, "RB1701", POSITION_DIRECTION.SHORT, 5, 0, context.expected_avg_price)
elif 10 < context.counter < 15:
pos = get_position("RB1701", POSITION_DIRECTION.SHORT)
assert_position(pos, "RB1701", POSITION_DIRECTION.SHORT, 0, 5, context.expected_avg_price)
elif context.counter >= 15:
pos = get_position("RB1701", POSITION_DIRECTION.SHORT)
assert_position(pos, "RB1701", POSITION_DIRECTION.SHORT, 0, 3, context.expected_avg_price)
return init, handle_bar
| apache-2.0 |
ilyes14/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 71 | 18815 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
sserrot/champion_relationships | venv/share/doc/networkx-2.4/examples/graph/plot_roget.py | 1 | 2636 | #!/usr/bin/env python
"""
=====
Roget
=====
Build a directed graph of 1022 categories and
5075 cross-references as defined in the 1879 version of Roget's Thesaurus
contained in the datafile roget_dat.txt. This example is described in
Section 1.2 in Knuth's book (see [1]_ and [2]_).
Note that one of the 5075 cross references is a self loop yet
it is included in the graph built here because
the standard networkx `DiGraph` class allows self loops.
(cf. 400pungency:400 401 403 405).
References
----------
.. [1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
.. [2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
# Authors: Brendt Wohlberg, Aric Hagberg (hagberg@lanl.gov)
# Date: 2005-04-01 07:56:22 -0700 (Fri, 01 Apr 2005)
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import gzip
import re
import sys
import matplotlib.pyplot as plt
from networkx import nx
def roget_graph():
""" Return the thesaurus graph from the roget.dat example in
the Stanford Graph Base.
"""
# open file roget_dat.txt.gz (or roget_dat.txt)
fh = gzip.open('roget_dat.txt.gz', 'r')
G = nx.DiGraph()
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
if line.startswith(" "): # this is a continuation line, append
line = oldline + line
if line.endswith("\\\n"): # continuation line, buffer, goto next
oldline = line.strip("\\\n")
continue
(headname, tails) = line.split(":")
# head
numfind = re.compile("^\d+") # re to find the number of this word
head = numfind.findall(headname)[0] # get the number
G.add_node(head)
for tail in tails.split():
if head == tail:
print("skipping self loop", head, tail, file=sys.stderr)
G.add_edge(head, tail)
return G
if __name__ == '__main__':
G = roget_graph()
print("Loaded roget_dat.txt containing 1022 categories.")
print("digraph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
UG = G.to_undirected()
print(nx.number_connected_components(UG), "connected components")
options = {
'node_color': 'black',
'node_size': 1,
'line_color': 'grey',
'linewidths': 0,
'width': 0.1,
}
nx.draw_circular(UG, **options)
plt.show()
| mit |
Hiyorimi/scikit-image | doc/examples/transform/plot_radon_transform.py | 4 | 8773 | """
===============
Radon transform
===============
In computed tomography, the tomography reconstruction problem is to obtain
a tomographic slice image from a set of projections [1]_. A projection is
formed by drawing a set of parallel rays through the 2D object of interest,
assigning the integral of the object's contrast along each ray to a single
pixel in the projection. A single projection of a 2D object is one dimensional.
To enable computed tomography reconstruction of the object, several projections
must be acquired, each of them corresponding to a different angle between the
rays with respect to the object. A collection of projections at several angles
is called a sinogram, which is a linear transform of the original image.
The inverse Radon transform is used in computed tomography to reconstruct
a 2D image from the measured projections (the sinogram). A practical, exact
implementation of the inverse Radon transform does not exist, but there are
several good approximate algorithms available.
As the inverse Radon transform reconstructs the object from a set of
projections, the (forward) Radon transform can be used to simulate a
tomography experiment.
This script performs the Radon transform to simulate a tomography experiment
and reconstructs the input image based on the resulting sinogram formed by
the simulation. Two methods for performing the inverse Radon transform
and reconstructing the original image are compared: The Filtered Back
Projection (FBP) and the Simultaneous Algebraic Reconstruction
Technique (SART).
For further information on tomographic reconstruction, see
- AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging",
http://www.slaney.org/pct/pct-toc.html
- http://en.wikipedia.org/wiki/Radon_transform
The forward transform
=====================
As our original image, we will use the Shepp-Logan phantom. When calculating
the Radon transform, we need to decide how many projection angles we wish
to use. As a rule of thumb, the number of projections should be about the
same as the number of pixels there are across the object (to see why this
is so, consider how many unknown pixel values must be determined in the
reconstruction process and compare this to the number of measurements
provided by the projections), and we follow that rule here. Below is the
original image and its Radon transform, often known as its _sinogram_:
"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage import data_dir
from skimage.transform import radon, rescale
image = imread(data_dir + "/phantom.png", as_grey=True)
image = rescale(image, scale=0.4)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5))
ax1.set_title("Original")
ax1.imshow(image, cmap=plt.cm.Greys_r)
theta = np.linspace(0., 180., max(image.shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=True)
ax2.set_title("Radon transform\n(Sinogram)")
ax2.set_xlabel("Projection angle (deg)")
ax2.set_ylabel("Projection position (pixels)")
ax2.imshow(sinogram, cmap=plt.cm.Greys_r,
extent=(0, 180, 0, sinogram.shape[0]), aspect='auto')
fig.tight_layout()
plt.show()
######################################################################
#
# Reconstruction with the Filtered Back Projection (FBP)
# ======================================================
#
# The mathematical foundation of the filtered back projection is the Fourier
# slice theorem [2]_. It uses Fourier transform of the projection and
# interpolation in Fourier space to obtain the 2D Fourier transform of the
# image, which is then inverted to form the reconstructed image. The filtered
# back projection is among the fastest methods of performing the inverse
# Radon transform. The only tunable parameter for the FBP is the filter,
# which is applied to the Fourier transformed projections. It may be used to
# suppress high frequency noise in the reconstruction. ``skimage`` provides a
# few different options for the filter.
from skimage.transform import iradon
reconstruction_fbp = iradon(sinogram, theta=theta, circle=True)
error = reconstruction_fbp - image
print('FBP rms reconstruction error: %.3g' % np.sqrt(np.mean(error**2)))
imkwargs = dict(vmin=-0.2, vmax=0.2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4.5),
sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1.set_title("Reconstruction\nFiltered back projection")
ax1.imshow(reconstruction_fbp, cmap=plt.cm.Greys_r)
ax2.set_title("Reconstruction error\nFiltered back projection")
ax2.imshow(reconstruction_fbp - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
######################################################################
#
# Reconstruction with the Simultaneous Algebraic Reconstruction Technique
# =======================================================================
#
# Algebraic reconstruction techniques for tomography are based on a
# straightforward idea: for a pixelated image the value of a single ray in a
# particular projection is simply a sum of all the pixels the ray passes
# through on its way through the object. This is a way of expressing the
# forward Radon transform. The inverse Radon transform can then be formulated
# as a (large) set of linear equations. As each ray passes through a small
# fraction of the pixels in the image, this set of equations is sparse,
# allowing iterative solvers for sparse linear systems to tackle the system
# of equations. One iterative method has been particularly popular, namely
# Kaczmarz' method [3]_, which has the property that the solution will
# approach a least-squares solution of the equation set.
#
# The combination of the formulation of the reconstruction problem as a set
# of linear equations and an iterative solver makes algebraic techniques
# relatively flexible, hence some forms of prior knowledge can be
# incorporated with relative ease.
#
# ``skimage`` provides one of the more popular variations of the algebraic
# reconstruction techniques: the Simultaneous Algebraic Reconstruction
# Technique (SART) [1]_ [4]_. It uses Kaczmarz' method [3]_ as the iterative
# solver. A good reconstruction is normally obtained in a single iteration,
# making the method computationally effective. Running one or more extra
# iterations will normally improve the reconstruction of sharp, high
# frequency features and reduce the mean squared error at the expense of
# increased high frequency noise (the user will need to decide on what number
# of iterations is best suited to the problem at hand. The implementation in
# ``skimage`` allows prior information of the form of a lower and upper
# threshold on the reconstructed values to be supplied to the reconstruction.
from skimage.transform import iradon_sart
reconstruction_sart = iradon_sart(sinogram, theta=theta)
error = reconstruction_sart - image
print('SART (1 iteration) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
fig, axes = plt.subplots(2, 2, figsize=(8, 8.5), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
ax[0].set_title("Reconstruction\nSART")
ax[0].imshow(reconstruction_sart, cmap=plt.cm.Greys_r)
ax[1].set_title("Reconstruction error\nSART")
ax[1].imshow(reconstruction_sart - image, cmap=plt.cm.Greys_r, **imkwargs)
# Run a second iteration of SART by supplying the reconstruction
# from the first iteration as an initial estimate
reconstruction_sart2 = iradon_sart(sinogram, theta=theta,
image=reconstruction_sart)
error = reconstruction_sart2 - image
print('SART (2 iterations) rms reconstruction error: %.3g'
% np.sqrt(np.mean(error**2)))
ax[2].set_title("Reconstruction\nSART, 2 iterations")
ax[2].imshow(reconstruction_sart2, cmap=plt.cm.Greys_r)
ax[3].set_title("Reconstruction error\nSART, 2 iterations")
ax[3].imshow(reconstruction_sart2 - image, cmap=plt.cm.Greys_r, **imkwargs)
plt.show()
######################################################################
# References
#
# .. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic Imaging", IEEE Press 1988. http://www.slaney.org/pct/pct-toc.html
#
# .. [2] Wikipedia, Radon transform, http://en.wikipedia.org/wiki/Radon_transform#Relationship_with_the_Fourier_transform
#
# .. [3] S Kaczmarz, "Angenaeherte Aufloesung von Systemen linearer Gleichungen", Bulletin International de l'Academie Polonaise des Sciences et des Lettres, 35 pp 355--357 (1937)
#
# .. [4] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
# technique (SART): a superior implementation of the ART algorithm", Ultrasonic Imaging 6 pp 81--94 (1984)
| bsd-3-clause |
amueller/pystruct | examples/plot_ssvm_objective_curves.py | 5 | 2606 | """
==================================
SSVM Convergence Curves
==================================
Showing the relation between cutting plane and primal objectives,
as well as the different algorithms.
We use exact inference here, so the plots are easier to interpret.
As this is a small toy example, it is hard to generalize
the results indicated in the plot to more realistic settigs.
"""
import numpy as np
import matplotlib.pyplot as plt
from pystruct.models import GridCRF
from pystruct.learners import (NSlackSSVM, OneSlackSSVM, SubgradientSSVM,
FrankWolfeSSVM)
from pystruct.datasets import generate_crosses_explicit
X, Y = generate_crosses_explicit(n_samples=50, noise=10, size=6, n_crosses=1)
n_labels = len(np.unique(Y))
crf = GridCRF(n_states=n_labels, inference_method=("ad3", {'branch_and_bound': True}))
n_slack_svm = NSlackSSVM(crf, check_constraints=False,
max_iter=50, batch_size=1, tol=0.001)
one_slack_svm = OneSlackSSVM(crf, check_constraints=False,
max_iter=100, tol=0.001, inference_cache=50)
subgradient_svm = SubgradientSSVM(crf, learning_rate=0.001, max_iter=20,
decay_exponent=0, momentum=0)
bcfw_svm = FrankWolfeSSVM(crf, max_iter=50, check_dual_every=4)
#n-slack cutting plane ssvm
n_slack_svm.fit(X, Y)
# 1-slack cutting plane ssvm
one_slack_svm.fit(X, Y)
# online subgradient ssvm
subgradient_svm.fit(X, Y)
# Block coordinate Frank-Wolfe
bcfw_svm.fit(X, Y)
# don't plot objective from chached inference for 1-slack
inference_run = ~np.array(one_slack_svm.cached_constraint_)
time_one = np.array(one_slack_svm.timestamps_[1:])[inference_run]
# plot stuff
plt.plot(n_slack_svm.timestamps_[1:], n_slack_svm.objective_curve_,
label="n-slack cutting plane")
plt.plot(n_slack_svm.timestamps_[1:], n_slack_svm.primal_objective_curve_,
label="n-slack primal")
plt.plot(time_one,
np.array(one_slack_svm.objective_curve_)[inference_run],
label="one-slack cutting_plane")
plt.plot(time_one,
np.array(one_slack_svm.primal_objective_curve_)[inference_run],
label="one-slack primal")
plt.plot(subgradient_svm.timestamps_[1:], subgradient_svm.objective_curve_,
label="subgradient")
plt.plot(bcfw_svm.timestamps_[1:], bcfw_svm.objective_curve_,
label="Block-Coordinate Frank-Wolfe Dual")
plt.plot(bcfw_svm.timestamps_[1:], bcfw_svm.primal_objective_curve_,
label="Block-Coordinate Frank-Wolfe Primal")
plt.legend(loc="best")
plt.yscale('log')
plt.xlabel("training time")
plt.show()
| bsd-2-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/io/tests/sas/test_xport.py | 7 | 4841 | import pandas as pd
import pandas.util.testing as tm
from pandas.io.sas.sasreader import read_sas
import numpy as np
import os
# CSV versions of test xpt files were obtained using the R foreign library
# Numbers in a SAS xport file are always float64, so need to convert
# before making comparisons.
def numeric_as_float(data):
for v in data.columns:
if data[v].dtype is np.dtype('int64'):
data[v] = data[v].astype(np.float64)
class TestXport(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt")
self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt")
self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt")
self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt")
def test1_basic(self):
# Tests with DEMO_G.xpt (all numeric file)
# Compare to this
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
numeric_as_float(data_csv)
# Read full file
data = read_sas(self.file01, format="xport")
tm.assert_frame_equal(data, data_csv)
num_rows = data.shape[0]
# Test reading beyond end of file
reader = read_sas(self.file01, format="xport", iterator=True)
data = reader.read(num_rows + 100)
self.assertTrue(data.shape[0] == num_rows)
reader.close()
# Test incremental read with `read` method.
reader = read_sas(self.file01, format="xport", iterator=True)
data = reader.read(10)
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
# Test incremental read with `get_chunk` method.
reader = read_sas(self.file01, format="xport", chunksize=10)
data = reader.get_chunk()
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :])
# Test read in loop
m = 0
reader = read_sas(self.file01, format="xport", chunksize=100)
for x in reader:
m += x.shape[0]
reader.close()
self.assertTrue(m == num_rows)
# Read full file with `read_sas` method
data = read_sas(self.file01)
tm.assert_frame_equal(data, data_csv)
def test1_index(self):
# Tests with DEMO_G.xpt using index (all numeric file)
# Compare to this
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
data_csv = data_csv.set_index("SEQN")
numeric_as_float(data_csv)
# Read full file
data = read_sas(self.file01, index="SEQN", format="xport")
tm.assert_frame_equal(data, data_csv, check_index_type=False)
# Test incremental read with `read` method.
reader = read_sas(self.file01, index="SEQN", format="xport",
iterator=True)
data = reader.read(10)
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :],
check_index_type=False)
# Test incremental read with `get_chunk` method.
reader = read_sas(self.file01, index="SEQN", format="xport",
chunksize=10)
data = reader.get_chunk()
reader.close()
tm.assert_frame_equal(data, data_csv.iloc[0:10, :],
check_index_type=False)
def test1_incremental(self):
# Test with DEMO_G.xpt, reading full file incrementally
data_csv = pd.read_csv(self.file01.replace(".xpt", ".csv"))
data_csv = data_csv.set_index("SEQN")
numeric_as_float(data_csv)
reader = read_sas(self.file01, index="SEQN", chunksize=1000)
all_data = [x for x in reader]
data = pd.concat(all_data, axis=0)
tm.assert_frame_equal(data, data_csv, check_index_type=False)
def test2(self):
# Test with SSHSV1_A.xpt
# Compare to this
data_csv = pd.read_csv(self.file02.replace(".xpt", ".csv"))
numeric_as_float(data_csv)
data = read_sas(self.file02)
tm.assert_frame_equal(data, data_csv)
def test_multiple_types(self):
# Test with DRXFCD_G.xpt (contains text and numeric variables)
# Compare to this
data_csv = pd.read_csv(self.file03.replace(".xpt", ".csv"))
data = read_sas(self.file03, encoding="utf-8")
tm.assert_frame_equal(data, data_csv)
def test_truncated_float_support(self):
# Test with paxraw_d_short.xpt, a shortened version of:
# http://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/PAXRAW_D.ZIP
# This file has truncated floats (5 bytes in this case).
# GH 11713
data_csv = pd.read_csv(self.file04.replace(".xpt", ".csv"))
data = read_sas(self.file04, format="xport")
tm.assert_frame_equal(data.astype('int64'), data_csv)
| apache-2.0 |
JingJunYin/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 39 | 3957 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: Dict of input `Tensor`.
labels: Label `Tensor`.
mode: One of `ModeKeys`.
Returns:
`EstimatorSpec`.
"""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
with tf.device('/device:GPU:1'):
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
with tf.device('/device:GPU:2'):
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(
loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
hugobowne/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 27 | 5247 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
rsignell-usgs/notebook | NCSS/netcdf_subset_service-COAWST_hwave.py | 1 | 2741 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# #Access Model and Observations using RESTful web services
# Using the NOAA ERDDAP web service, extract the latest data from a specified NDBC Station.
# Then using the Unidata "grid as point" NetCDF Subset Service, extract a time series closest to a specified lon,lat location.
#
# At first we use the NetCDF Subset Web Form below to construct the query, but then reuse the URL generated by the form directly in the Python code below. In this way different time periods, depths or variables may be extracted without returning to the form, and analyzed and visualized in Python without saving and loading CSV files.
# <codecell>
from IPython.display import HTML
# <codecell>
import pandas as pd
import time
import datetime as dt
# <headingcell level=2>
# Specify NDBC Buoy Station
# <codecell>
station = '44098' # Block Island
# <codecell>
# Specify desired time range
# ....relative to right now ....
tstart = dt.datetime.utcnow() - dt.timedelta(days=4)
tstop = dt.datetime.utcnow() + dt.timedelta(days=1)
start=tstart.strftime('%Y-%m-%dT%H:%M:%SZ')
stop=tstop.strftime('%Y-%m-%dT%H:%M:%SZ')
# ... or specific times (UTC)
#start = '2014-08-24T00:00:00Z'
#stop = '2014-08-28T00:00:00Z'
print start,'\n', stop
# <headingcell level=2>
# Read NDBC Wave Buoy Data using ERDDAP
# <codecell>
#HTML('http://coastwatch.pfeg.noaa.gov/erddap/tabledap/cwwcNDBCMet.html')
# <codecell>
url='http://coastwatch.pfeg.noaa.gov/erddap/tabledap/cwwcNDBCMet.csv?\
station,time,longitude,latitude,wvht\
&station="%s"&time>=%s&time<=%s' % (station,start,stop)
print url
# <codecell>
# read CSV observations into Pandas DataFrame
df_obs = pd.read_csv(url,index_col='time',parse_dates=True,skiprows=[1])
# <codecell>
lon=df_obs['longitude'][0]
lat=df_obs['latitude'][0]
print lon,lat
# <headingcell level=2>
# Read COAWST Wave Data at the NDBC Buoy Location using the Unidata NetCDF Subset Service
# <codecell>
#HTML("http://geoport.whoi.edu/thredds/ncss/grid/bbleh/spring2012/pointDataset.html")
# <codecell>
var = 'Hwave'
url='http://geoport.whoi.edu/thredds/ncss/grid/coawst_4/use/fmrc/coawst_4_use_best.ncd?\
var=%s&latitude=%f&longitude=%f&time_start=%s&time_end=%s&\
vertCoord=&accept=csv' % (var,lat,lon,start,stop)
print(url)
# <codecell>
#load model data CSV into Pandas DataFrame
df_mod = pd.read_csv(url,index_col='date',parse_dates=True)
# <headingcell level=2>
# Plot the time series
# <codecell>
fig, ax = plt.subplots(figsize=(12, 4))
ax = df_mod['Hwave[unit="meter"]'].plot(ax=ax, legend=True)
df_obs['wvht'].plot(ax=ax, legend=True)
ax.set_title('Wave Height at Station %s' % station);
# <codecell>
# <codecell>
# <codecell>
| mit |
kyleabeauchamp/HMCNotes | code/misc/test_mjhmc.py | 1 | 1360 | import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems, integrators
precision = "mixed"
sysname = "switchedljbox"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes = lb_loader.equilibrate(testsystem, temperature, timestep, steps=equil_steps, minimize=True, use_hmc=False)
timestep = 1.0 * u.femtoseconds
steps_per_hmc = 25
integrator = hmc_integrators.MJHMCIntegrator(temperature=temperature, steps_per_hmc=steps_per_hmc, timestep=timestep)
f = lambda x: integrator.getGlobalVariableByName(x)
simulation = lb_loader.build(testsystem, integrator, temperature, precision=precision)
print("Before")
print(simulation.context.getState(getEnergy=True).getPotentialEnergy(), simulation.context.getState(getEnergy=True).getKineticEnergy())
integrator.step(1)
print("After")
print(simulation.context.getState(getEnergy=True).getPotentialEnergy(), simulation.context.getState(getEnergy=True).getKineticEnergy())
print(f("last_move"))
print(f("ELm") + f("KLm"), f("E0") + f("K0"), f("E") + f("K"))
print(f("gammaL"), f("gammaF"), f("gammaR"))
print(f("wL"), f("wF"), f("wR"))
print(f("calculated_xLm"))
print(f("holding"))
| gpl-2.0 |
tedunderwood/biographies | topicmodel/interpret/autocondense_rolethemes.py | 1 | 2601 | # autocondense_rolethemes.py
# This script plays the same role for my custom topic model
# that condense_doctopics plays for MALLET:
# we're creating a portable subset of the doct-topic table
# that answers preregistered hypotheses.
# There are differences here because my doctopic file has a
# slightly different format, and especially because I haven't
# normalized the vectors yet. Also, I add authors to the output.
import sys, csv, os
import numpy as np
import pandas as pd
def getdoc(anid):
'''
Gets the docid part of a character id
'''
if '|' in anid:
thedoc = anid.split('|')[0]
else:
print('error', anid)
thedoc = anid
return thedoc
# MAIN starts here
def condense_a_file(inpath, outpath, themect):
# Read metadata in order to create lists of documents linked
# by an author or by a year.
meta = pd.read_csv('../../metadata/filtered_fiction_plus_18c.tsv', sep = '\t', index_col = 'docid')
meta = meta[~meta.index.duplicated(keep = 'first')]
docsbyauthor = dict()
groupedbyauthor = meta.groupby('author')
for auth, group in groupedbyauthor:
docsbyauthor[auth] = group.index.tolist()
docsbyyear = dict()
groupedbyyear = meta.groupby('inferreddate')
for yr, group in groupedbyyear:
docsbyyear[yr] = group.index.tolist()
significant_vols = set()
with open('../../evaluation/hypotheses.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
ids = [row['firstsim'], row['secondsim'], row['distractor']]
for anid in ids:
docid = getdoc(anid)
significant_vols.add(docid)
outlines = []
vectorsbydoc = dict()
significant_authors = set()
with open(inpath, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if fields[0] != 'char':
continue
charid = fields[1]
docid = getdoc(charid)
if docid in significant_vols:
vector = np.array(fields[3 : ], dtype = 'float32')
total = np.sum(vector)
if total < 1:
continue
vector = vector / total
author = meta.loc[docid, 'author']
line = [author, charid]
line.extend([str(x) for x in vector])
outlines.append(line)
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
for line in outlines:
f.write('\t'.join(line) + '\n')
| mit |
Vimos/scikit-learn | examples/feature_selection/plot_feature_selection.py | 95 | 2847 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
awemulya/fieldsight-kobocat | onadata/apps/fieldsight/management/commands/transfer_submission.py | 1 | 3365 | from django.conf import settings
from django.core.management.base import BaseCommand
import pandas as pd
from onadata.apps.fieldsight.models import Project
from onadata.apps.fsforms.models import FInstance
from onadata.apps.logger.models import Instance
from onadata.apps.viewer.models.parsed_instance import update_mongo_instance
def validate_column_sequence(columns):
return True
def move_submission(sheet_columns, project_id):
submission_id, from_site_identifier, to_site_identifier, form_name = tuple(sheet_columns)
project = Project.objects.get(pk=project_id)
to_site = project.sites.get(identifier=to_site_identifier)
print(to_site.identifier)
if FInstance.objects.filter(instance=submission_id).exists():
instance = FInstance.objects.get(instance=submission_id)
instance.site = to_site
instance.save()
d = instance.instance.parsed_instance.to_dict_for_mongo()
d.update(
{'fs_project_uuid': str(instance.project_fxf_id), 'fs_project': instance.project_id, 'fs_status': 0,
'fs_site': instance.site_id,
'fs_uuid': instance.site_fxf_id})
try:
synced = update_mongo_instance(d, instance.id)
print(synced, "updated in mongo success")
except Exception as e:
print(str(e))
# else:
# print("submision ", submission_id, "doesnot exists")
# print("creating Finstance for ", submission_id, ".......")
# query = {"_id": {"$in": submission_id}}
# xform_instances = settings.MONGO_DB.instances
# cursor = xform_instances.find(query, { "_id": 1, "fs_project_uuid":1, "fs_project":1 , "fs_site":1,'fs_uuid':1 })
# records = list(record for record in cursor)
# for record in records:
# instance = Instance.objects.get(pk=submission_id)
# fi = FInstance(instance=instance, site=to_site, project=to_site.project, project_fxf=record["fs_project_uuid"], form_status=0, submitted_by=instance.user)
# fi.set_version()
# fi.save()
def process_transfer_submissions(xl, to_transfer_sheet, project_id):
df = xl.parse(to_transfer_sheet)
columns = df.columns
if validate_column_sequence(columns):
for i in range(len(df.values)):
move_submission(df.values[i], project_id)
def process_delete_submission(xl, to_delete_sheet):
df = xl.parse(to_delete_sheet)
submission_ids = []
for i in range(len(df.values)):
submission_ids.append(df.values[i][0])
result = FInstance.objects.filter(instance__id__in=submission_ids).update(is_deleted=True)
print(result)
class Command(BaseCommand):
help = 'Create default groups'
def add_arguments(self, parser):
parser.add_argument('file_path', type=str)
parser.add_argument('project_id', type=int)
def handle(self, *args, **options):
file_path = options['file_path']
project_id = options['project_id']
self.stdout.write('Reading file "%s"' % file_path)
xl = pd.ExcelFile(file_path)
to_transfer_sheet = xl.sheet_names[0]
to_delete_sheet = xl.sheet_names[1]
process_transfer_submissions(xl, to_transfer_sheet, project_id)
process_delete_submission(xl, to_delete_sheet)
self.stdout.write('Reading file "%s"' % file_path) | bsd-2-clause |
cosurgi/trunk | py/plot.py | 2 | 35291 | # encoding: utf-8
# 2008 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Module containing utility functions for plotting inside yade. See :ysrc:`examples/simple-scene/simple-scene-plot.py` or :ysrc:`examples/concrete/uniax.py` for example of usage.
"""
from __future__ import print_function
## all exported names
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
__all__=['data','plots','labels','live','liveInterval','autozoom','plot','reset','resetData','splitData','reverseData','addData','addAutoData','saveGnuplot','saveDataTxt','savePlotSequence']
# multi-threaded support for Tk
# safe to import even if Tk will not be used
import mtTkinter as Tkinter
try:
import Image
except:
try:
import PIL.Image
except:
import warnings
warnings.warn("PIL (python-imaging package) must be installed to use yade.plot")
import matplotlib,os,time,math,itertools
# running in batch
#
# If GtkAgg is the default, X must be working, which is not the case
# with batches (DISPLAY is unset in such case) and importing pylab fails then.
#
# Agg does not require the GUI part and works without any DISPLAY active
# just fine.
#
# see http://www.mail-archive.com/yade-dev@lists.launchpad.net/msg04320.html
# and https://lists.launchpad.net/yade-users/msg03289.html
#
import yade.runtime
if not yade.runtime.hasDisplay: matplotlib.use('Agg')
from minieigen import *
#matplotlib.use('TkAgg')
#matplotlib.use('GTKAgg')
##matplotlib.use('QtAgg')
matplotlib.rc('axes',grid=True) # put grid in all figures
import pylab
data={}
"Global dictionary containing all data values, common for all plots, in the form {'name':[value,...],...}. Data should be added using plot.addData function. All [value,...] columns have the same length, they are padded with NaN if unspecified."
imgData={}
"Dictionary containing lists of strings, which have the meaning of images corresponding to respective :yref:`yade.plot.data` rows. See :yref:`yade.plot.plots` on how to plot images."
plots={} # dictionary x-name -> (yspec,...), where yspec is either y-name or (y-name,'line-specification')
"dictionary x-name -> (yspec,...), where yspec is either y-name or (y-name,'line-specification'). If ``(yspec,...)`` is ``None``, then the plot has meaning of image, which will be taken from respective field of :yref:`yade.plot.imgData`."
labels={}
"Dictionary converting names in data to human-readable names (TeX names, for instance); if a variable is not specified, it is left untranslated."
xylabels={}
"Dictionary of 2-tuples specifying (xlabel,ylabel) for respective plots; if either of them is None, the default auto-generated title is used."
legendLoc=('upper left','upper right')
"Location of the y1 and y2 legends on the plot, if y2 is active."
live=True if yade.runtime.hasDisplay else False
"Enable/disable live plot updating. Disabled by default for now, since it has a few rough edges."
liveInterval=1
"Interval for the live plot updating, in seconds."
autozoom=True
"Enable/disable automatic plot rezooming after data update."
scientific=True if hasattr(pylab,'ticklabel_format') else False ## safe default for older matplotlib versions
"Use scientific notation for axes ticks."
axesWd=0
"Linewidth (in points) to make *x* and *y* axes better visible; not activated if non-positive."
current=-1
"Point that is being tracked with a scatter point. -1 is for the last point, set to *nan* to disable."
afterCurrentAlpha=.2
"Color alpha value for part of lines after :yref:`yade.plot.current`, between 0 (invisible) to 1 (full color)"
scatterMarkerKw=dict(verts=[(0.,0.),(-30.,10.),(-25,0),(-30.,-10.)],marker=None)
"Parameters for the current position marker"
componentSeparator='_'
componentSuffixes={Vector2:{0:'x',1:'y'},Vector3:{0:'x',1:'y',2:'z'},Matrix3:{(0,0):'xx',(1,1):'yy',(2,2):'zz',(0,1):'xy',(0,2):'xz',(1,2):'yz',(1,0):'yx',(2,0):'zx',(2,1):'zy'}}
# if a type with entry in componentSuffixes is given in addData, columns for individual components are synthesized using indices and suffixes given for each type, e.g. foo=Vector3r(1,2,3) will result in columns foox=1,fooy=2,fooz=3
def reset():
"Reset all plot-related variables (data, plots, labels)"
global data, plots, labels # plotLines
data={}; plots={}; imgData={} # plotLines={};
pylab.close('all')
def resetData():
"Reset all plot data; keep plots and labels intact."
global data
data={}
from yade.wrapper import *
def splitData():
"Make all plots discontinuous at this point (adds nan's to all data fields)"
addData({})
def reverseData():
"""Reverse yade.plot.data order.
Useful for tension-compression test, where the initial (zero) state is loaded and, to make data continuous, last part must *end* in the zero state.
"""
for k in data: data[k].reverse()
def addDataColumns(dd):
'''Add new columns with NaN data, without adding anything to other columns. Does nothing for columns that already exist'''
numSamples=len(data[list(data.keys())[0]]) if len(data)>0 else 0
for d in dd:
if d in list(data.keys()): continue
data[d]=[nan for i in range(numSamples)]
def addAutoData():
"""Add data by evaluating contents of :yref:`yade.plot.plots`. Expressions rasing exceptions will be handled gracefully, but warning is printed for each.
>>> from yade import plot
>>> from pprint import pprint
>>> O.reset()
>>> plot.resetData()
>>> plot.plots={'O.iter':('O.time',None,'numParticles=len(O.bodies)')}
>>> plot.addAutoData()
>>> pprint(plot.data)
{'O.iter': [0], 'O.time': [0.0], 'numParticles': [0]}
Note that each item in :yref:`yade.plot.plots` can be
* an expression to be evaluated (using the ``eval`` builtin);
* ``name=expression`` string, where ``name`` will appear as label in plots, and expression will be evaluated each time;
* a dictionary-like object -- current keys are labels of plots and current values are added to :yref:`yade.plot.data`. The contents of the dictionary can change over time, in which case new lines will be created as necessary.
A simple simulation with plot can be written in the following way; note how the energy plot is specified.
>>> from yade import plot, utils
>>> plot.plots={'i=O.iter':(O.energy,None,'total energy=O.energy.total()')}
>>> # we create a simple simulation with one ball falling down
>>> plot.resetData()
>>> O.bodies.append(utils.sphere((0,0,0),1))
0
>>> O.dt=utils.PWaveTimeStep()
>>> O.engines=[
... ForceResetter(),
... GravityEngine(gravity=(0,0,-10),warnOnce=False),
... NewtonIntegrator(damping=.4,kinSplit=True),
... # get data required by plots at every step
... PyRunner(command='yade.plot.addAutoData()',iterPeriod=1,initRun=True)
... ]
>>> O.trackEnergy=True
>>> O.run(2,True)
>>> pprint(plot.data) #doctest: +ELLIPSIS
{'gravWork': [0.0, -25.13274...],
'i': [0, 1],
'kinRot': [0.0, 0.0],
'kinTrans': [0.0, 7.5398...],
'nonviscDamp': [0.0, 10.0530...],
'total energy': [0.0, -7.5398...]}
"""
# this part of docstring does not work with Sphinx
"""
.. plot::
from yade import *
from yade import plot,utils
O.reset()
O.engines=[ForceResetter(),GravityEngine(gravity=(0,0,-10),warnOnce=False),NewtonIntegrator(damping=.4,kinSplit=True),PyRunner(command='yade.plot.addAutoData()',iterPeriod=1,initRun=True)]
O.bodies.append(utils.sphere((0,0,0),1)); O.dt=utils.PWaveTimeStep()
plot.resetData()
plot.plots={'i=O.iter':(O.energy,None,'total energy=O.energy.total()')}
O.trackEnergy=True
O.run(50,True)
import pylab; pylab.grid(True)
plot.legendLoc=('lower left','upper right')
plot.plot(noShow=True)
"""
def colDictUpdate(col,dic):
'update *dic* with the value from col, which is a "expr" or "name=expr" string; all exceptions from ``eval`` are caught and warning is printed without adding any data.'
name,expr=col.split('=',1) if '=' in col else (col,col)
try:
val=eval(expr)
dic.update({name:val})
except:
print('WARN: ignoring exception raised while evaluating auto-column `'+expr+"'%s."%('' if name==expr else ' ('+name+')'))
cols={}
for p in plots:
pp=plots[p]
colDictUpdate(p.strip(),cols)
for y in tuplifyYAxis(plots[p]):
# imgplot specifier
if y==None: continue
yy=addPointTypeSpecifier(y,noSplit=True)[0]
# dict-like object
if hasattr(yy,'keys'): cols.update(dict(yy))
# callable returning list sequence of expressions to evaluate
#elif callable(yy):
# for yyy in yy(): colDictUpdate(yyy,cols)
# plain value
else: colDictUpdate(yy,cols)
addData(cols)
def addData(*d_in,**kw):
"""Add data from arguments name1=value1,name2=value2 to yade.plot.data.
(the old {'name1':value1,'name2':value2} is deprecated, but still supported)
New data will be padded with nan's, unspecified data will be nan (nan's don't appear in graphs).
This way, equal length of all data is assured so that they can be plotted one against any other.
>>> from yade import plot
>>> from pprint import pprint
>>> plot.resetData()
>>> plot.addData(a=1)
>>> plot.addData(b=2)
>>> plot.addData(a=3,b=4)
>>> pprint(plot.data)
{'a': [1, nan, 3], 'b': [nan, 2, 4]}
Some sequence types can be given to addData; they will be saved in synthesized columns for individual components.
>>> plot.resetData()
>>> plot.addData(c=Vector3(5,6,7),d=Matrix3(8,9,10, 11,12,13, 14,15,16))
>>> pprint(plot.data)
{'c_x': [5.0],
'c_y': [6.0],
'c_z': [7.0],
'd_xx': [8.0],
'd_xy': [9.0],
'd_xz': [10.0],
'd_yx': [11.0],
'd_yy': [12.0],
'd_yz': [13.0],
'd_zx': [14.0],
'd_zy': [15.0],
'd_zz': [16.0]}
"""
import numpy
if len(data)>0: numSamples=len(data[list(data.keys())[0]])
else: numSamples=0
# align with imgData, if there is more of them than data
if len(imgData)>0 and numSamples==0: numSamples=max(numSamples,len(imgData[list(imgData.keys())[0]]))
d=(d_in[0] if len(d_in)>0 else {})
d.update(**kw)
# handle types composed of multiple values (vectors, matrices)
dNames=list(d.keys())[:] # make copy, since dict cannot change size if iterated over directly
for name in dNames:
if type(d[name]) in componentSuffixes:
val=d[name]
suffixes=componentSuffixes[type(d[name])]
for ix in suffixes: d[name+componentSeparator+suffixes[ix]]=d[name][ix]
del d[name]
elif hasattr(d[name],'__len__'):
raise ValueError('plot.addData given unhandled sequence type (is a '+type(d[name]).__name__+', must be number or '+'/'.join([k.__name__ for k in componentSuffixes])+')')
for name in d:
if not name in list(data.keys()): data[name]=[]
for name in data:
data[name]+=(numSamples-len(data[name]))*[nan]
data[name].append(d[name] if name in d else nan)
#print [(k,len(data[k])) for k in data.keys()]
#numpy.array([nan for i in range(numSamples)])
#numpy.append(data[name],[d[name]],1)
def addImgData(**kw):
for k in kw:
if k not in imgData: imgData[k]=[]
# align imgData with data
if len(list(data.keys()))>0 and len(list(imgData.keys()))>0:
nData,nImgData=len(data[list(data.keys())[0]]),len(imgData[list(imgData.keys())[0]])
#if nImgData>nData-1: raise RuntimeError("imgData is already the same length as data?")
if nImgData<nData-1: # repeat last value
for k in list(imgData.keys()):
lastValue=imgData[k][-1] if len(imgData[k])>0 else None
imgData[k]+=(nData-len(imgData[k])-1)*[lastValue]
elif nData<nImgData:
for k in list(data.keys()):
lastValue=data[k][-1] if len(data[k])>0 else nan
data[k]+=(nImgData-nData)*[lastValue] # add one more, because we will append to imgData below
# add values from kw
newLen=(len(imgData[list(imgData.keys())[0]]) if imgData else 0)+1 # current length plus 1
for k in kw:
if k in imgData and len(imgData[k])>0: imgData[k]+=(newLen-len(imgData[k])-1)*[imgData[k][-1]]+[kw[k]] # repeat last element as necessary
else: imgData[k]=(newLen-1)*[None]+[kw[k]] # repeat None if no previous value
# align values which were not in kw by repeating the last value
for k in imgData:
if len(imgData[k])<newLen: imgData[k]+=(newLen-len(imgData[k]))*[imgData[k][-1]]
assert(len(set([len(i) for i in list(imgData.values())]))<=1) # no data or all having the same value
# not public functions
def addPointTypeSpecifier(o,noSplit=False):
"""Add point type specifier to simple variable name; optionally take only the part before '=' from the first item."""
if type(o) in [tuple,list]:
if noSplit or not type(o[0])==str: return o
else: return (o[0].split('=',1)[0],)+tuple(o[1:])
else: return (o if (noSplit or not type(o)==str) else (o.split('=',1)[0]),'')
def tuplifyYAxis(pp):
"""convert one variable to a 1-tuple"""
if type(pp) in [tuple,list]: return pp
else: return (pp,)
def xlateLabel(l):
"Return translated label; return l itself if not in the labels dict."
global labels
if l in list(labels.keys()): return labels[l]
else: return l
class LineRef(object):
"""Holds reference to plot line and to original data arrays (which change during the simulation),
and updates the actual line using those data upon request."""
def __init__(self,line,scatter,line2,xdata,ydata,dataName=None):
self.line,self.scatter,self.line2,self.xdata,self.ydata,self.dataName=line,scatter,line2,xdata,ydata,dataName
def update(self):
if isinstance(self.line,matplotlib.image.AxesImage):
# image name
try:
if len(self.xdata)==0 and self.dataName: self.xdata=imgData[self.dataName] # empty list reference an empty singleton, not the list we want; adjust here
if self.xdata[current]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(self.xdata[current])
self.line.set_data(img)
except IndexError: pass
else:
# regular data
import numpy
# current==-1 avoids copy slicing data in the else part
if current==None or current==-1 or afterCurrentAlpha==1:
self.line.set_xdata(self.xdata); self.line.set_ydata(self.ydata)
self.line2.set_xdata([]); self.line2.set_ydata([])
else:
try: # try if we can extend the first part by one so that lines are connected
self.xdata[:current+1]; preCurrEnd=current+1
except IndexError: preCurrEnd=current
preCurrEnd=current+(1 if len(self.xdata)>current else 0)
self.line.set_xdata(self.xdata[:preCurrEnd]); self.line.set_ydata(self.ydata[:preCurrEnd])
self.line2.set_xdata(self.xdata[current:]); self.line2.set_ydata(self.ydata[current:])
try:
x,y=self.xdata[current],self.ydata[current]
except IndexError: x,y=0,0
# this could be written in a nicer way, very likely
try:
pt=numpy.ndarray((2,),buffer=numpy.array([float(x),float(y)]))
if self.scatter:
self.scatter.set_offsets(pt)
# change rotation of the marker (possibly incorrect)
try:
dx,dy=self.xdata[current]-self.xdata[current-1],self.ydata[current]-self.ydata[current-1]
# smoothing from last n values, if possible
# FIXME: does not show arrow at all if less than window values
#try:
# window=10
# dx,dy=[numpy.average(numpy.diff(dta[current-window:current])) for dta in self.xdata,self.ydata]
#except IndexError: pass
# there must be an easier way to find on-screen derivative angle, ask on the matplotlib mailing list
axes=self.line.axes()
p=axes.patch; xx,yy=p.get_verts()[:,0],p.get_verts()[:,1]; size=max(xx)-min(xx),max(yy)-min(yy)
aspect=(size[1]/size[0])*(1./axes.get_data_ratio())
angle=math.atan(aspect*dy/dx)
if dx<0: angle-=math.pi
self.scatter.set_transform(matplotlib.transforms.Affine2D().rotate(angle))
except IndexError: pass
except TypeError: pass # this happens at i386 with empty data, saying TypeError: buffer is too small for requested array
currLineRefs=[]
liveTimeStamp=0 # timestamp when live update was started, so that the old thread knows to stop if that changes
nan=float('nan')
def createPlots(subPlots=True,scatterSize=60,wider=False):
global currLineRefs
figs=set([l.line.axes.get_figure() for l in currLineRefs]) # get all current figures
for f in figs: pylab.close(f) # close those
currLineRefs=[] # remove older plots (breaks live updates of windows that are still open)
if len(plots)==0: return # nothing to plot
if subPlots:
# compute number of rows and colums for plots we have
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
if wider: subRows,subCols=subCols,subRows
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if not subPlots: pylab.figure()
else: pylab.subplot(subRows,subCols,nPlot+1)
if plots[p]==None: # image plot
if not pStrip in list(imgData.keys()): imgData[pStrip]=[]
# fake (empty) image if no data yet
if len(imgData[pStrip])==0 or imgData[pStrip][-1]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(imgData[pStrip][-1])
img=pylab.imshow(img,origin='lower')
currLineRefs.append(LineRef(img,None,None,imgData[pStrip],None,pStrip))
pylab.gca().set_axis_off()
continue
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
plots_p_y1,plots_p_y2=[],[]; y1=True
missing=set() # missing data columns
if pStrip not in list(data.keys()): missing.add(pStrip)
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_p_y1.append(d)
else: plots_p_y2.append(d)
if d[0] not in list(data.keys()) and not callable(d[0]) and not hasattr(d[0],'keys'): missing.add(d[0])
if missing:
if len(list(data.keys()))==0 or len(data[list(data.keys())[0]])==0: # no data at all yet, do not add garbage NaNs
for m in missing: data[m]=[]
else:
print('Missing columns in plot.data, adding NaN: ',','.join(list(missing)))
addDataColumns(missing)
def createLines(pStrip,ySpecs,isY1=True,y2Exists=False):
'''Create data lines from specifications; this code is common for y1 and y2 axes;
it handles y-data specified as callables, which might create additional lines when updated with liveUpdate.
'''
# save the original specifications; they will be smuggled into the axes object
# the live updated will run yNameFuncs to see if there are new lines to be added
# and will add them if necessary
yNameFuncs=set([d[0] for d in ySpecs if callable(d[0])]) | set([d[0].keys for d in ySpecs if hasattr(d[0],'keys')])
yNames=set()
ySpecs2=[]
for ys in ySpecs:
# ys[0]() must return list of strings, which are added to ySpecs2; line specifier is synthesized by tuplifyYAxis and cannot be specified by the user
if callable(ys[0]): ySpecs2+=[(ret,ys[1]) for ret in ys[0]()]
elif hasattr(ys[0],'keys'): ySpecs2+=[(yy,'') for yy in list(ys[0].keys())]
else: ySpecs2.append(ys)
if len(ySpecs2)==0:
print('yade.plot: creating fake plot, since there are no y-data yet')
line,=pylab.plot([nan],[nan])
line2,=pylab.plot([nan],[nan])
currLineRefs.append(LineRef(line,None,line2,[nan],[nan]))
# set different color series for y1 and y2 so that they are recognizable
if 'axes.color_cycle' in pylab.rcParams: pylab.rcParams['axes.color_cycle']='b,g,r,c,m,y,k' if not isY1 else 'm,y,k,b,g,r,c'
for d in ySpecs2:
yNames.add(d)
line,=pylab.plot(data[pStrip],data[d[0]],d[1],label=xlateLabel(d[0]))
line2,=pylab.plot([],[],d[1],color=line.get_color(),alpha=afterCurrentAlpha)
# use (0,0) if there are no data yet
scatterPt=[0,0] if len(data[pStrip])==0 else (data[pStrip][current],data[d[0]][current])
# if current value is NaN, use zero instead
scatter=pylab.scatter(scatterPt[0] if not math.isnan(scatterPt[0]) else 0,scatterPt[1] if not math.isnan(scatterPt[1]) else 0,s=scatterSize,color=line.get_color(),**scatterMarkerKw)
currLineRefs.append(LineRef(line,scatter,line2,data[pStrip],data[d[0]]))
axes=line.axes
labelLoc=(legendLoc[0 if isY1 else 1] if y2Exists>0 else 'best')
l=pylab.legend(loc=labelLoc)
if hasattr(l,'draggable'): l.draggable(True)
if scientific:
pylab.ticklabel_format(style='sci',scilimits=(0,0),axis='both')
# fixes scientific exponent placement for y2: https://sourceforge.net/mailarchive/forum.php?thread_name=20101223174750.GD28779%40ykcyc&forum_name=matplotlib-users
if not isY1: axes.yaxis.set_offset_position('right')
if isY1:
pylab.ylabel((', '.join([xlateLabel(_p[0]) for _p in ySpecs2])) if p not in xylabels or not xylabels[p][1] else xylabels[p][1])
pylab.xlabel(xlateLabel(pStrip) if (p not in xylabels or not xylabels[p][0]) else xylabels[p][0])
else:
pylab.ylabel((', '.join([xlateLabel(_p[0]) for _p in ySpecs2])) if (p not in xylabels or len(xylabels[p])<3 or not xylabels[p][2]) else xylabels[p][2])
# if there are callable/dict ySpecs, save them inside the axes object, so that the live updater can use those
if yNameFuncs:
axes.yadeYNames,axes.yadeYFuncs,axes.yadeXName,axes.yadeLabelLoc=yNames,yNameFuncs,pStrip,labelLoc # prepend yade to avoid clashes
createLines(pStrip,plots_p_y1,isY1=True,y2Exists=len(plots_p_y2)>0)
if axesWd>0:
pylab.axhline(linewidth=axesWd,color='k')
pylab.axvline(linewidth=axesWd,color='k')
# create y2 lines, if any
if len(plots_p_y2)>0:
pylab.twinx() # create the y2 axis
createLines(pStrip,plots_p_y2,isY1=False,y2Exists=True)
if 'title' in list(O.tags.keys()): pylab.title(O.tags['title'])
def liveUpdate(timestamp):
global liveTimeStamp
liveTimeStamp=timestamp
while True:
if not live or liveTimeStamp!=timestamp: return
figs,axes,linesData=set(),set(),set()
for l in currLineRefs:
l.update()
figs.add(l.line.get_figure())
axes.add(l.line.axes)
linesData.add(id(l.ydata))
# find callables in y specifiers, create new lines if necessary
for ax in axes:
if not hasattr(ax,'yadeYFuncs') or not ax.yadeYFuncs: continue # not defined of empty
yy=set();
for f in ax.yadeYFuncs:
if callable(f): yy.update(f())
elif hasattr(f,'keys'): yy.update(list(f.keys()))
else: raise ValueError("Internal error: ax.yadeYFuncs items must be callables or dictionary-like objects and nothing else.")
#print 'callables y names:',yy
news=yy-ax.yadeYNames
if not news: continue
for new in news:
ax.yadeYNames.add(new)
if new in list(data.keys()) and id(data[new]) in linesData: continue # do not add when reloaded and the old lines are already there
print('yade.plot: creating new line for',new)
if not new in list(data.keys()): data[new]=len(data[ax.yadeXName])*[nan] # create data entry if necessary
#print 'data',len(data[ax.yadeXName]),len(data[new]),data[ax.yadeXName],data[new]
line,=ax.plot(data[ax.yadeXName],data[new],label=xlateLabel(new)) # no line specifier
line2,=ax.plot([],[],color=line.get_color(),alpha=afterCurrentAlpha)
scatterPt=(0 if len(data[ax.yadeXName])==0 or math.isnan(data[ax.yadeXName][current]) else data[ax.yadeXName][current]),(0 if len(data[new])==0 or math.isnan(data[new][current]) else data[new][current])
scatter=ax.scatter(scatterPt[0],scatterPt[1],s=60,color=line.get_color(),**scatterMarkerKw)
currLineRefs.append(LineRef(line,scatter,line2,data[ax.yadeXName],data[new]))
ax.set_ylabel(ax.get_ylabel()+(', ' if ax.get_ylabel() else '')+xlateLabel(new))
# it is possible that the legend has not yet been created
l=ax.legend(loc=ax.yadeLabelLoc)
if hasattr(l,'draggable'): l.draggable(True)
if autozoom:
for ax in axes:
try:
ax.relim() # recompute axes limits
ax.autoscale_view()
except RuntimeError: pass # happens if data are being updated and have not the same dimension at the very moment
for fig in figs:
try:
fig.canvas.draw()
except RuntimeError: pass # happens here too
time.sleep(liveInterval)
def savePlotSequence(fileBase,stride=1,imgRatio=(5,7),title=None,titleFrames=20,lastFrames=30):
'''Save sequence of plots, each plot corresponding to one line in history. It is especially meant to be used for :yref:`yade.utils.makeVideo`.
:param stride: only consider every stride-th line of history (default creates one frame per each line)
:param title: Create title frame, where lines of title are separated with newlines (``\\n``) and optional subtitle is separated from title by double newline.
:param int titleFrames: Create this number of frames with title (by repeating its filename), determines how long the title will stand in the movie.
:param int lastFrames: Repeat the last frame this number of times, so that the movie does not end abruptly.
:return: List of filenames with consecutive frames.
'''
createPlots(subPlots=True,scatterSize=60,wider=True)
sqrtFigs=math.sqrt(len(plots))
pylab.gcf().set_size_inches(8*sqrtFigs,5*sqrtFigs) # better readable
pylab.subplots_adjust(left=.05,right=.95,bottom=.05,top=.95) # make it more compact
if len(plots)==1 and plots[list(plots.keys())[0]]==None: # only pure snapshot is there
pylab.gcf().set_size_inches(5,5)
pylab.subplots_adjust(left=0,right=1,bottom=0,top=1)
#if not data.keys(): raise ValueError("plot.data is empty.")
pltLen=max(len(data[list(data.keys())[0]]) if data else 0,len(imgData[list(imgData.keys())[0]]) if imgData else 0)
if pltLen==0: raise ValueError("Both plot.data and plot.imgData are empty.")
global current, currLineRefs
ret=[]
print('Saving %d plot frames, it can take a while...'%(pltLen))
for i,n in enumerate(range(0,pltLen,stride)):
current=n
for l in currLineRefs: l.update()
out=fileBase+'-%03d.png'%i
pylab.gcf().savefig(out)
ret.append(out)
if len(ret)==0: raise RuntimeError("No images created?!")
if title:
titleImgName=fileBase+'-title.png'
createTitleFrame(titleImgName,Image.open(ret[-1]).size,title)
ret=titleFrames*[titleImgName]+ret
if lastFrames>1: ret+=(lastFrames-1)*[ret[-1]]
return ret
def createTitleFrame(out,size,title):
'create figure with title and save to file; a figure object must be opened to get the right size'
pylab.clf(); fig=pylab.gcf()
#insize=fig.get_size_inches(); size=insize[1]*fig.get_dpi(),insize[0]*fig.get_dpi() # this gives wrong dimensions...
#fig.set_facecolor('blue'); fig.patch.set_color('blue'); fig.patch.set_facecolor('blue'); fig.patch.set_alpha(None)
title,subtitle=title.split('\n\n')
lines=[(t,True) for t in title.split('\n')]+([(t,False) for t in subtitle.split('\n')] if subtitle else [])
nLines=len(lines); fontSizes=size[1]/10.,size[1]/16.
import matplotlib.mathtext
def writeLine(text,vertPos,fontsize):
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize,dpi=fig.get_dpi(),color='blue')
textsize=rgba.shape[1],rgba.shape[0]
if textsize[0]>size[0]:
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize*size[0]/textsize[0],dpi=fig.get_dpi(),color='blue')
textsize=rgba.shape[1],rgba.shape[0]
fig.figimage(rgba.astype(float)/255.,xo=(size[0]-textsize[0])/2.,yo=vertPos-depth)
ht=size[1]; y0=ht-2*fontSizes[0]; yStep=(ht-2.5*fontSizes[0])/len(lines)
for i,(l,isTitle) in enumerate(lines):
writeLine(l,y0-i*yStep,fontSizes[0 if isTitle else 1])
fig.savefig(out)
def plot(noShow=False,subPlots=True):
"""Do the actual plot, which is either shown on screen (and nothing is returned: if *noShow* is ``False`` - note that your yade compilation should present qt4 feature so that figures can be displayed) or, if *noShow* is ``True``, returned as matplotlib's Figure object or list of them.
You can use
>>> from yade import plot
>>> plot.resetData()
>>> plot.plots={'foo':('bar',)}
>>> plot.plot(noShow=True).savefig('someFile.pdf')
>>> import os
>>> os.path.exists('someFile.pdf')
True
>>> os.remove('someFile.pdf')
to save the figure to file automatically.
.. note:: For backwards compatibility reasons, *noShow* option will return list of figures for multiple figures but a single figure (rather than list with 1 element) if there is only 1 figure.
"""
createPlots(subPlots=subPlots)
global currLineRefs
figs=set([l.line.axes.get_figure() for l in currLineRefs])
if not hasattr(list(figs)[0],'show') and not noShow:
import warnings
warnings.warn('plot.plot not showing figure (matplotlib using headless backend?)')
noShow=True
if not noShow:
if not yade.runtime.hasDisplay: return # would error out with some backends, such as Agg used in batches
if live:
import _thread
_thread.start_new_thread(liveUpdate,(time.time(),))
# pylab.show() # this blocks for some reason; call show on figures directly
for f in figs:
f.show()
else:
figs=list(set([l.line.get_figure() for l in currLineRefs]))
if len(figs)==1: return figs[0]
else: return figs
def saveDataTxt(fileName,vars=None, headers=None):
"""Save plot data into a (optionally compressed) text file. The first line contains a comment (starting with ``#``) giving variable name for each of the columns. This format is suitable for being loaded for further processing (outside yade) with ``numpy.genfromtxt`` function, which recognizes those variable names (creating numpy array with named entries) and handles decompression transparently.
>>> from yade import plot
>>> from pprint import pprint
>>> plot.reset()
>>> plot.addData(a=1,b=11,c=21,d=31) # add some data here
>>> plot.addData(a=2,b=12,c=22,d=32)
>>> pprint(plot.data)
{'a': [1, 2], 'b': [11, 12], 'c': [21, 22], 'd': [31, 32]}
>>> plot.saveDataTxt('/tmp/dataFile.txt.tar.gz',vars=('a','b','c'))
>>> import numpy
>>> d=numpy.genfromtxt('/tmp/dataFile.txt.tar.gz',dtype=None,names=True)
>>> d['a']
array([1, 2])
>>> d['b']
array([11, 12])
:param fileName: file to save data to; if it ends with ``.bz2`` / ``.gz``, the file will be compressed using bzip2 / gzip.
:param vars: Sequence (tuple/list/set) of variable names to be saved. If ``None`` (default), all variables in :yref:`yade.plot.plot` are saved.
:param headers: Set of parameters to write on header
"""
import bz2,gzip
if not vars:
vars=list(data.keys()); vars.sort()
write_bytemode=False
if fileName.endswith('.bz2'): f=bz2.BZ2File(fileName,'w') ; write_bytemode=True
elif fileName.endswith('.gz'): f=gzip.GzipFile(fileName,'w') ; write_bytemode=True
else: f=open(fileName,'w')
if headers:
k = list(headers.keys());
for i in range(len(k)):
out=("# "+k[i]+"=\t"+str(headers[k[i]])+"\n")
if(write_bytemode): out=out.encode("utf-8")
f.write(out)
out=str("# "+"\t\t".join(vars)+"\n")
if(write_bytemode): out=out.encode("utf-8")
f.write(out)
for i in range(len(data[vars[0]])):
out="\t".join([str(data[var][i]) for var in vars])+"\n"
if(write_bytemode): out=out.encode("utf-8")
f.write(out)
f.close()
def savePylab(baseName,timestamp=False,title=None):
'''This function is not finished, do not use it.'''
import time
if len(list(data.keys()))==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
saveDataTxt(fileName=baseName+'.data.bz2')
if len(plots)==0: raise RuntimeError("No plots to save, only data saved.")
py=open(baseName+'.py','w')
py.write('#!/usr/bin/env python\n# encoding: utf-8\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\nimport pylab, numpy\n')
py.write("data=numpy.genfromtxt('%s.data.bz2',dtype=None,names=True)\n"%baseName)
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue # image plots, which is not exported
if len(plots)==1: py.write('pylab.figure()\n')
else: py.write('pylab.subplot(%d,%d,%d)\n'%(subRows,subCols,nPlots+1))
def _mkTimestamp():
import time
return time.strftime('_%Y%m%d_%H:%M')
def saveGnuplot(baseName,term='wxt',extension=None,timestamp=False,comment=None,title=None,varData=False):
"""Save data added with :yref:`yade.plot.addData` into (compressed) file and create .gnuplot file that attempts to mimick plots specified with :yref:`yade.plot.plots`.
:param baseName: used for creating baseName.gnuplot (command file for gnuplot), associated ``baseName.data.bz2`` (data) and output files (if applicable) in the form ``baseName.[plot number].extension``
:param term: specify the gnuplot terminal; defaults to ``x11``, in which case gnuplot will draw persistent windows to screen and terminate; other useful terminals are ``png``, ``cairopdf`` and so on
:param extension: extension for ``baseName`` defaults to terminal name; fine for png for example; if you use ``cairopdf``, you should also say ``extension='pdf'`` however
:param bool timestamp: append numeric time to the basename
:param bool varData: whether file to plot will be declared as variable or be in-place in the plot expression
:param comment: a user comment (may be multiline) that will be embedded in the control file
:return: name of the gnuplot file created.
"""
if len(list(data.keys()))==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
vars=list(data.keys()); vars.sort()
saveDataTxt(fileName=baseName+'.data.bz2',vars=vars)
fPlot=open(baseName+".gnuplot",'w')
fPlot.write('#!/usr/bin/env gnuplot\n#\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\n')
if comment: fPlot.write('# '+comment.replace('\n','\n# ')+'#\n')
dataFile='"< bzcat %s.data.bz2"'%(baseNameNoPath)
if varData:
fPlot.write('dataFile=%s'%dataFile); dataFile='dataFile'
if not extension: extension=term
i=0
for p in plots:
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue ## this plot is image plot, which is not applicable to gnuplot
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
if term in ['wxt','x11']: fPlot.write("set term %s %d persist\n"%(term,i))
else: fPlot.write("set term %s; set output '%s.%d.%s'\n"%(term,baseNameNoPath,i,extension))
fPlot.write("set xlabel '%s'\n"%xlateLabel(p))
fPlot.write("set grid\n")
fPlot.write("set datafile missing 'nan'\n")
if title: fPlot.write("set title '%s'\n"%title)
y1=True; plots_y1,plots_y2=[],[]
# replace callable/dict-like data specifiers by the results, it that particular data exists
plots_p2=[]
for pp in plots_p:
if callable(pp[0]): plots_p2+=[(ppp,'') for ppp in pp[0]() if ppp in list(data.keys())]
elif hasattr(pp[0],'keys'): plots_p2+=[(name,val) for name,val in list(pp[0].items()) if name in list(data.keys())]
else: plots_p2.append((pp[0],pp[1]))
plots_p=plots_p2
#plots_p=sum([([(pp,'') for pp in p[0]() if pp in data.keys()] if callable(p[0]) else [(p[0],p[1])] ) for p in plots_p],[])
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_y1.append(d)
else: plots_y2.append(d)
fPlot.write("set ylabel '%s'\n"%(','.join([xlateLabel(_p[0]) for _p in plots_y1])))
if len(plots_y2)>0:
fPlot.write("set y2label '%s'\n"%(','.join([xlateLabel(_p[0]) for _p in plots_y2])))
fPlot.write("set y2tics\n")
ppp=[]
for pp in plots_y1: ppp.append(" %s using %d:%d title '← %s(%s)' with lines"%(dataFile,vars.index(pStrip)+1,vars.index(pp[0])+1,xlateLabel(pp[0]),xlateLabel(pStrip),))
for pp in plots_y2: ppp.append(" %s using %d:%d title '%s(%s) →' with lines axes x1y2"%(dataFile,vars.index(pStrip)+1,vars.index(pp[0])+1,xlateLabel(pp[0]),xlateLabel(pStrip),))
fPlot.write("plot "+",".join(ppp)+"\n")
i+=1
fPlot.close()
return baseName+'.gnuplot'
| gpl-2.0 |
uvemas/ViTables | examples/scripts/scikits_timeseries2.py | 1 | 2711 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2019 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - vmas@vitables.org
"""Storing time series created with scikits.timeseries module in PyTables.
Example 2.
Notes:
-The dates from the yahoo quotes module get returned as integers, which happen
to correspond to the integer representation of 'DAILY' frequency dates in the
scikits.timeseries module.
-`fill_missing_dates` will insert masked values for any missing data points.
Note that you could plot the series without doing this, but it would cause
missing values to be linearly interpolated rather than left empty in the plot
"""
import os
import datetime
from matplotlib.finance import quotes_historical_yahoo
import scikits.timeseries as ts
import scikits.timeseries.lib.tstables as tstab
startdate = datetime.date(2002, 1, 5)
enddate = datetime.date(2003, 12, 1)
# retrieve data from yahoo.
# Data format is [(d, open, close, high, low, volume), ...] where d is
# a floating point representation of the number of days since 01-01-01 UTC
quotes = quotes_historical_yahoo('INTC', startdate, enddate)
# Create a DateArray of daily dates and convert it to business day frequency
dates = ts.date_array([q[0] for q in quotes], freq='DAILY').asfreq('BUSINESS')
opens = [q[1] for q in quotes]
# opens: the data portion of the timeserie
# dates: the date portion of the timeserie
raw_series = ts.time_series(opens, dates)
test_series = raw_series
#test_series = ts.fill_missing_dates(raw_series, fill_value=-1)
# Write to a PyTables file
output_dir = '../timeseries'
try:
os.mkdir(output_dir)
except OSError:
pass
hdf5_name = 'scikits_test2.hdf5'
filepath_hdf5 = os.path.join(output_dir, hdf5_name)
h5file = tstab.open_file(filepath_hdf5, mode="w",
title='Example table with csikits time series')
group_doc = h5file.create_group("/", 'examples', 'Test Data')
table = h5file.createTimeSeriesTable(group_doc, 'Example_2', test_series)
h5file.close()
| gpl-3.0 |
etkirsch/scikit-learn | sklearn/manifold/t_sne.py | 48 | 20644 | # Author: Alexander Fabisch -- <afabisch@informatik.uni-bremen.de>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
dilawar/moose-full | moose-examples/paper-2015/Fig5_CellMultiscale/Fig5A.py | 2 | 6176 | ########################################################################
# This program is copyright (c) Upinder S. Bhalla, NCBS, 2015.
# It is licenced under the GPL 2.1 or higher.
# There is no warranty of any kind. You are welcome to make copies under
# the provisions of the GPL.
# This programme illustrates building a panel of multiscale models to
# test neuronal plasticity in different contexts. The simulation is set
# to settle for 5 seconds, then a 2 second tetanus is delivered, then
# the simulation continues for another 50 seconds.
# By default we set it to run the smallest model, that takes about 4 minutes
# to run 57 seconds of simulation time, on an Intel core I7 at
# 2.2 GHz. The big model, VHC-neuron, takes almost 90 minutes.
# This program dumps data to text files for further analysis.
########################################################################
import moogli
import numpy
import time
import pylab
import moose
from moose import neuroml
from PyQt4 import Qt, QtCore, QtGui
import matplotlib.pyplot as plt
import sys
import os
from moose.neuroml.ChannelML import ChannelML
sys.path.append('/home/bhalla/moose/trunk/Demos/util')
import rdesigneur as rd
PI = 3.14159265359
useGssa = True
combineSegments = False
#### Choose your favourite models here. #################
#elecFileNames = ( "ca1_minimal.p", )
#elecFileNames = ( "ca1_minimal.p", "h10.CNG.swc" )
elecFileNames = ( "CA1.morph.xml", "ca1_minimal.p", "VHC-neuron.CNG.swc", "h10.CNG.swc" )
synSpineList = []
synDendList = []
probeInterval = 0.1
probeAmplitude = 1.0
tetanusFrequency = 100.0
tetanusAmplitude = 1000
tetanusAmplitudeForSpines = 1000
baselineTime = 5
tetTime = 2
postTetTime = 50
def buildRdesigneur():
##################################################################
# Here we define which prototypes are to be loaded in to the system.
# Each specification has the format
# source [localName]
# source can be any of
# filename.extension, # Identify type of file by extension, load it.
# function(), # func( name ) builds object of specified name
# file.py:function() , # load Python file, run function(name) in it.
# moose.Classname # Make obj moose.Classname, assign to name.
# path # Already loaded into library or on path.
# After loading the prototypes, there should be an object called 'name'
# in the library.
##################################################################
spineProto = [ \
['makeSpineProto()', 'spine' ]
]
##################################################################
# Here we define what goes where, and any parameters. Each distribution
# has the format
# protoName, path, field, expr, [field, expr]...
# where
# protoName identifies the prototype to be placed on the cell
# path is a MOOSE wildcard path specifying where to put things
# field is the field to assign.
# expr is a math expression to define field value. This uses the
# muParser. Built-in variables are p, g, L, len, dia.
# The muParser provides most math functions, and the Heaviside
# function H(x) = 1 for x > 0 is also provided.
##################################################################
passiveDistrib = [
[ ".", "#", "RM", "2.8", "CM", "0.01", "RA", "1.5", \
"Em", "-58e-3", "initVm", "-65e-3" ], \
[ ".", "#axon#", "RA", "0.5" ] \
]
spineDistrib = [ \
["spine", '#apical#', "spineSpacing", "20e-6", \
"spineSpacingDistrib", "2e-6", \
"angle", "0", \
"angleDistrib", str( 2*PI ), \
"size", "1", \
"sizeDistrib", "0.5" ] \
]
######################################################################
# Having defined everything, now to create the rdesigneur and proceed
# with creating the model.
######################################################################
rdes = rd.rdesigneur(
combineSegments = combineSegments, \
stealCellFromLibrary = True, \
passiveDistrib = passiveDistrib, \
spineDistrib = spineDistrib, \
spineProto = spineProto \
)
return rdes
def interlude( view ):
view.yaw( 0.01 )
def create_viewer(rdes):
print ' doing viewer for ', rdes.soma.path
network = moogli.extensions.moose.read(rdes.elecid.path)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.5,
1.0,
1.0),
moogli.colors.Color(1.0,
0.0,
0.0,
0.9)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in network.shapes.keys()]
network.set("color", vms, mapper)
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(network.shapes.values())
view = moogli.View("vm-view", interlude=interlude )
viewer.attach_view(view)
viewer.show()
viewer.start()
view.pitch( PI/2.0 )
return viewer
def main():
app = QtGui.QApplication(sys.argv)
numpy.random.seed(1234)
rdes = buildRdesigneur()
viewers = []
j = 0
for i in elecFileNames:
print i
ename = '/model' + str(j)
rdes.cellProtoList = [ ['./cells/' + i, 'elec' ] ]
rdes.buildModel( ename )
assert( moose.exists( ename ) )
moose.reinit()
viewers.append(create_viewer(rdes))
j = j + 1
app.exec_()
if __name__ == '__main__':
main()
| gpl-2.0 |
e-q/scipy | scipy/optimize/minpack.py | 4 | 34808 | import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater,
asarray, inf,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
from scipy._lib._util import _asarray_validated, _lazywhere
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
# from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
Examples
--------
Find a solution to the system of equations:
``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
>>> from scipy.optimize import fsolve
>>> def func(x):
... return [x[0] * np.cos(x[1]) - 4,
... x[1] * x[0] - x[1] - 5]
>>> root = fsolve(func, [1, 1])
>>> root
array([6.50409711, 0.90841421])
>>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
array([ True, True])
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
Should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided,
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
Examples
--------
>>> from scipy.optimize import leastsq
>>> def func(x):
... return 2*(x-3)**2+1
>>> leastsq(func, 0)
(array([2.99999999]), 1)
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``.
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-D `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-D `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-D `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False (default), only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit. Default is False.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters). Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized.
pcov : 2-D array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
sig = _getfullargspec(f)
args = sig.args
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs cannot be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-D, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-D, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError as e:
raise ValueError("`sigma` must be positive definite.") from e
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed point of the function: i.e., where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2",
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| bsd-3-clause |
kkozarev/mwacme | casa_commands_instructions/find_local_maxima_multi_integrated_subset.py | 2 | 8760 | import numpy as np
import scipy
import scipy.optimize as opt
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import matplotlib.pyplot as plt
import glob, os, sys, fnmatch
from astropy.io import fits as pyfits
from astropy.io import ascii
#define model Gaussian function and pass independent variables x and y as a list
def twoD_Gaussian(xytuple, amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
(x, y) = xytuple
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo) + c*((y-yo)**2)))
return g.ravel()
def check_xy_range (x,y,peak_box):
result = False
if (peak_box['x'][0] <= x <= peak_box['x'][1]):
if (peak_box['y'][0] <= y <= peak_box['y'][1]):
result = True
return result
#TEST check_xy_range
#print check_xy_range(10,10,{'x':[0,20],'y':[0,20]})
#The old data location
#if sys.platform == 'darwin': BASEDIR='/Users/kamen/ubuntu_share/MWA_DATA/'
#if sys.platform == 'linux2': BASEDIR='/mnt/ubuntu_share/MWA_DATA/'
#The new data location
if sys.platform == 'darwin': BASEDIR='/Volumes/Transcend/MWA_DATA/'
if sys.platform == 'linux2': BASEDIR='/mnt/MWA_DATA/'
#'139-140'#'103-104','125-126'#'069-070','113-114' #'093-094' #'084-085' # '076-077' # '062-063'
CHANNELS = ['062-063','069-070','076-077','084-085','093-094','113-114','139-140','153-154','169-170','187-188','125-126']
CHANNELS = ['062-063']
#CHANNELS = ['069-070']
#CHANNELS = ['103-104']
CHANNELS = ['062-063','069-070','076-077','084-085','093-094','103-104']
polarization='XX'
vv=0 #Whether to look for VV-corrected data or not
date='2015/11/04 '
maxindices=['1','2']
dpi=40
neighborhood_size = 10
threshold = 50
force=1
#
timesubset=['034000','034200','034400','034600','034800',
'035000','035200','035400','035600','035800','040000',
'040200','040400','040600','040800']
timesubset=['034100','034300','034500','034700','034900',
'035100','035300','035500','035700','035900','040100',
'040300','040500','040700']
#DEFINE THE PEAK BOX AROUND THE SUN LOCATION WITHIN WHICH TO SEARCH
all_peak_box={'062-063':{'x':[280,629],'y':[300,649]},
'069-070':{'x':[280,629],'y':[300,649]},
'076-077':{'x':[240,589],'y':[350,699]},
'084-085':{'x':[280,629],'y':[300,649]},
'093-094':{'x':[230,579],'y':[340,689]},
'103-104':{'x':[320,669],'y':[290,639]},
'113-114':{'x':[230,579],'y':[340,689]},
'125-126':{'x':[230,579],'y':[340,689]},
'139-140':{'x':[230,579],'y':[340,689]},
'153-154':{'x':[290,639],'y':[300,649]},
'169-170':{'x':[240,589],'y':[330,679]},
'187-188':{'x':[315,664],'y':[285,634]}}
for CHANNEL in CHANNELS:
print 'Working on channel '+CHANNEL
datadir=BASEDIR+'synchrotron/'
OUTDIR=datadir
peak_box=all_peak_box[CHANNEL]
#GET the Peak and RMS for the Dynamic Range of the image.
drfile='DR_'+CHANNEL+'_'+polarization+'_synchrotron.txt'
drdata=ascii.read(datadir+drfile)
tmp=drdata['col1']
drfnames=[]
fitsfiles=[]
drtimestrings=[]
for fname in tmp:
drtimestrings.append(fname.split('_t')[1].split('_XX')[0])
fitsfiles.append(BASEDIR+fname.split('/mnt/MWA_DATA/')[1].split('.image')[0]+'.fits')
drfnames.append(os.path.basename(fname.split('.image')[0]))
peak=drdata['col2']
rms=drdata['col3']
#CHECK if previous Maxima files exist, record the data
old_maxdata={}
maxindices=['1','2']
for maxindex in maxindices:
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+polarization+'_synchrotron.txt'
if os.path.exists(datadir+maxfile):
maxdata=ascii.read(datadir+maxfile)
old_timestring=[]
for time in maxdata['times']:
old_timestring.append(''.join(time.split(' ')[1].split(':')))
maxdata['timestring']=old_timestring
old_maxdata[maxindex]=maxdata
#CHECK if previous Integrated Maxima file exists, get the last time saved
update=0
maxfile='Max1_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
if os.path.exists(datadir+maxfile):
update=1
maxdata=ascii.read(datadir+maxfile)
inttimestring=[]
for time in maxdata['times']:
inttimestring.append(''.join(time.split(' ')[1].split(':')))
lasttime=inttimestring[-1]
lasttimeind=drtimestrings.index(lasttime)
fitsfiles=fitsfiles[lasttimeind+1:]
#Load the real data, and try to fit a gaussian.
xwidth=5
ywidth=5
maxindices=['1','2']
if update == 0:
for maxindex in maxindices:
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
maxf = open(datadir+maxfile, 'w')
maxf.write("maxintens maxlocx_px maxlocy_px times integrated_flux\n")
maxf.close()
for ii,infile in enumerate(fitsfiles):
#First, find the right index for the Maxdata
reftimstr=drtimestrings[ii]
ii1=-1
ii2=-1
if reftimstr in old_maxdata['1']['timestring']:
ii1=old_maxdata['1']['timestring'].tolist().index(reftimstr)
if reftimstr in old_maxdata['2']['timestring']:
ii2=old_maxdata['2']['timestring'].tolist().index(reftimstr)
if not old_maxdata['1']['timestring'][ii1] in timesubset: continue
print "Working on file "+infile
#Load the data
hd=pyfits.open(infile)
indata=np.squeeze(hd[0].data)
dataflat=indata.ravel()
# Create x and y indices
xsize, ysize = indata.shape
x = np.linspace(0, xsize, ysize)
y = np.linspace(0, xsize, ysize)
x, y = np.meshgrid(x, y)
#Get the original RMS value
rmsval=rms[ii]*10.
#Loop over the maximum indices
maxindices=['1','2']
for maxindex in maxindices:
if maxindex == '1':
ind=ii1
if maxindex == '2':
ind=ii2
maxfile='Max'+maxindex+'_info_'+CHANNEL+'_'+polarization+'_synchrotron_integrated.txt'
#Get the original Maximum locations and intensities
maxintens=old_maxdata[maxindex]['maxintens'][ind]
maxlocx_px=old_maxdata[maxindex]['maxlocx_px'][ind]
maxlocy_px=old_maxdata[maxindex]['maxlocy_px'][ind]
cutoff = 0.1*maxintens
#MAXIMUM FITTING AND RECORDING THE INFORMATION
initial_guess=(maxintens-rmsval,maxlocx_px,maxlocy_px,xwidth,ywidth,0.,rmsval)
param_bounds=([-np.inf,maxlocx_px-1,maxlocy_px-1,xwidth-2,ywidth-2,-np.inf,-np.inf],
[np.inf,maxlocx_px+1,maxlocy_px+1,xwidth+5,ywidth+5,np.inf,np.inf])
#param_bounds=([-np.inf,maxlocx_px-3,maxlocy_px-3,-np.inf,-np.inf,-np.inf,-np.inf],
# [np.inf,maxlocx_px+3,maxlocy_px+3,np.inf,np.inf,np.inf,np.inf])
try:
#,bounds=param_bounds
popt, pcov = opt.curve_fit(twoD_Gaussian, (x,y), dataflat-cutoff, p0=initial_guess,
bounds=param_bounds)
except RuntimeError as e:
print "Runtime error! Continuing..."
continue
maxdata_fitted = twoD_Gaussian((x, y), *popt)
maxdata_fitted2d=maxdata_fitted.reshape(xsize, ysize)
if maxindex == '1': max1data_fitted2d=maxdata_fitted2d
if maxindex == '2': max2data_fitted2d=maxdata_fitted2d
maxindices=zip(np.where(maxdata_fitted.reshape(xsize, ysize) > rmsval))
maxf = open(datadir+maxfile, 'a')
maxf.write('{0:.3f} {1:d} {2:d} "{3}" {4:.3f}\n'.format(popt[0],int(popt[1]),int(popt[2]),old_maxdata[maxindex]['times'][ind],np.sum(indata[maxindices])))
maxf.close()
#Plot the fitting result
plotfile='gaussfit_'+CHANNEL+'_'+polarization+'_'+reftimstr+'_synchrotron.png'
contourvals=np.array([1,5,10,15,20,25])*rmsval
fig, ax = plt.subplots(1, 1)
ax.hold(True)
ax.imshow(indata, cmap=plt.cm.jet, origin='bottom',
extent=(x.min(), x.max(), y.min(), y.max()))
ax.contour(x, y, max1data_fitted2d, 8, colors='w')
ax.contour(x, y, max2data_fitted2d, 8, colors='y')
plt.title(CHANNEL)
plt.savefig(datadir+plotfile)
plt.close()
| gpl-2.0 |
bavardage/statsmodels | statsmodels/graphics/tests/test_regressionplots.py | 5 | 4406 | '''Tests for regressionplots, entire module is skipped
'''
import numpy as np
import nose
import statsmodels.api as sm
from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr,
plot_partregress, plot_regress_exog, abline_plot,
plot_partregress_grid, plot_ccpr_grid, add_lowess)
from pandas import Series, DataFrame
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
def setup():
if not have_matplotlib:
raise nose.SkipTest('No tests here')
def teardown_module():
plt.close('all')
class TestPlot(object):
def __init__(self):
self.setup() #temp: for testing without nose
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
res = sm.OLS(y, exog0).fit()
self.res = res
def test_plot_fit(self):
res = self.res
fig = plot_fit(res, 0, y_true=None)
x0 = res.model.exog[:, 0]
yf = res.fittedvalues
y = res.model.endog
px1, px2 = fig.axes[0].get_lines()[0].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(y, px2)
px1, px2 = fig.axes[0].get_lines()[1].get_data()
np.testing.assert_equal(x0, px1)
np.testing.assert_equal(yf, px2)
plt.close(fig)
def test_plot_oth(self):
#just test that they run
res = self.res
endog = res.model.endog
exog = res.model.exog
plot_fit(res, 0, y_true=None)
plot_partregress_grid(res, exog_idx=[0,1])
plot_regress_exog(res, exog_idx=0)
plot_ccpr(res, exog_idx=0)
plot_ccpr_grid(res, exog_idx=[0])
fig = plot_ccpr_grid(res, exog_idx=[0,1])
for ax in fig.axes:
add_lowess(ax)
plt.close('all')
class TestPlotPandas(TestPlot):
def setup(self):
nsample = 100
sig = 0.5
x1 = np.linspace(0, 20, nsample)
x2 = 5 + 3* np.random.randn(nsample)
X = np.c_[x1, x2, np.sin(0.5*x1), (x2-5)**2, np.ones(nsample)]
beta = [0.5, 0.5, 1, -0.04, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
exog0 = sm.add_constant(np.c_[x1, x2], prepend=False)
exog0 = DataFrame(exog0, columns=["const", "var1", "var2"])
y = Series(y, name="outcome")
res = sm.OLS(y, exog0).fit()
self.res = res
class TestABLine(object):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
mod = sm.OLS(y,X).fit()
cls.X = X
cls.y = y
cls.mod = mod
def test_abline_model(self):
fig = abline_plot(model_results=self.mod)
ax = fig.axes[0]
ax.scatter(self.X[:,1], self.y)
plt.close(fig)
def test_abline_model_ax(self):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(model_results=self.mod, ax=ax)
plt.close(fig)
def test_abline_ab(self):
mod = self.mod
intercept, slope = mod.params
fig = abline_plot(intercept=intercept, slope=slope)
plt.close(fig)
def test_abline_ab_ax(self):
mod = self.mod
intercept, slope = mod.params
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(self.X[:,1], self.y)
fig = abline_plot(intercept=intercept, slope=slope, ax=ax)
plt.close(fig)
class TestABLinePandas(TestABLine):
@classmethod
def setupClass(cls):
np.random.seed(12345)
X = sm.add_constant(np.random.normal(0, 20, size=30))
y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
cls.X = X
cls.y = y
X = DataFrame(X, columns=["const", "someX"])
y = Series(y, name="outcome")
mod = sm.OLS(y,X).fit()
cls.mod = mod
| bsd-3-clause |
jjo31/ATHAM-Fluidity | examples/hokkaido-nansei-oki_tsunami/raw_data/plotinputwave.py | 5 | 2520 | #!/usr/bin/env python
from fluidity_tools import stat_parser
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import getopt
import sys
import csv
def usage():
print "plotinputwave.py -b starttime -e endtime --save=basename"
def get_inputelevation(t):
InputWaveReader = csv.reader(open('InputWave.csv', 'rb'), delimiter='\t')
data=[]
for (time, heigth) in InputWaveReader:
data.append((float(time), float(heigth)))
for i in range(1,len(data)):
if data[i][0]<t:
continue
t1=data[max(0,i-1)][0]
t2=data[i][0]
h1=data[max(0,i-1)][1]
h2=data[i][1]
return h1*(t-t2)/(t1-t2)+h2*(t-t1)/(t2-t1)
print "Warning: t is outside the available data. Using last available waterheigth..."
return data[-1][1]
def main(argv=None):
dt=0.05 # use same timestep than in csv file
try:
opts, args = getopt.getopt(sys.argv[1:], "t:e:b:", ['save='])
except getopt.GetoptError:
print "Getopterror :("
usage()
sys.exit(2)
subtitle=''
subtitle_pure=''
endtime=22.5
starttime=0.0
save=False
for opt, arg in opts:
if opt == '--save':
save=True
savename=arg
elif opt=='-h' or opt=='--help':
usage()
sys.exit(2)
elif opt=='-t':
subtitle=', '+arg
subtitle_pure=arg
elif opt=='-b':
starttime=float(arg)
elif opt=='-e':
endtime=float(arg)
print "Generating plot"
print 'Using dt=', dt
starttimestep=int(max(0,starttime/dt))
endtimestep=int(endtime/dt)
print 'starttimestep=', starttimestep
print 'endtimestep=', endtimestep
# fill in measurement data
input_elevation=[]
time=[]
for i in range(starttimestep, endtimestep):
time.append(i*dt)
elev=get_inputelevation(time[-1])
input_elevation.append(elev*100.0) # in cm
plt.ion() # switch in interactive mode
fig1= figure()
subplt1 = fig1.add_subplot(111, xlabel='Time [s]', ylabel='Water level [cm]')
subplt1.plot(time, input_elevation) # plot gauge1 detector data
if not save:
plt.draw()
raw_input("Press Enter to exit")
else:
plt.savefig(savename+'.pdf', facecolor='white', edgecolor='black', dpi=100)
print 'Saved to '+savename+'.pdf'
# for i in range(timesteps):
# gauge1.append(s["water"]["FreeSurface"]["gauge1"])
if __name__ == "__main__":
main()
| lgpl-2.1 |
CFIS-Octarine/octarine | validate/fitsviewer/interaction.py | 1 | 5450 | __author__ = "David Rusk <drusk@uvic.ca>"
from exceptions import MPLViewerError
class InteractionContext(object):
"""
Very useful reference for matplotlib event handling:
http://matplotlib.org/users/event_handling.html
"""
MOUSE_BUTTON_LEFT = 1
MOUSE_BUTTON_RIGHT = 3
def __init__(self, displayable):
self.displayable = displayable
self._register_event_handlers()
self.state = CreateMarkerState(self)
def _register_event_handlers(self):
"""
Connect to start listening for the relevant events.
"""
#self.cidpress = self.displayable.register_mpl_event_handler(
# "button_press_event", self.on_press)
#self.cidrelease = self.displayable.register_mpl_event_handler(
# "button_release_event", self.on_release)
#self.cidmotion = self.displayable.register_mpl_event_handler(
# "motion_notify_event", self.on_motion)
return
def on_press(self, event):
if not self.displayable.is_event_in_axes(event):
return
if event.button == InteractionContext.MOUSE_BUTTON_LEFT:
self.state = self._choose_left_click_state(event)
elif event.button == InteractionContext.MOUSE_BUTTON_RIGHT:
self.state = AdjustColormapState(self)
else:
# Ignore any other button such as middle click.
return
self.state.on_press(event)
def _choose_left_click_state(self, event):
marker = self.get_marker()
if marker is None:
in_marker = False
else:
in_marker, _ = marker.contains(event)
if in_marker:
return MoveMarkerState(self)
else:
return CreateMarkerState(self)
def on_motion(self, event):
if not self.displayable.is_event_in_axes(event):
return
self.state.on_motion(event)
def on_release(self, event):
self.state.on_release(event)
self.displayable.release_focus()
def get_marker(self):
return self.displayable.marker
def update_marker(self, x, y, radius=None):
self.displayable.update_marker(x, y, radius)
def update_colormap(self, dx, dy):
self.displayable.update_colormap(dx, dy)
def disconnect(self):
"""Disconnects all the stored connection ids"""
return
#self.displayable.deregister_mpl_event_handler(self.cidpress)
#self.displayable.deregister_mpl_event_handler(self.cidrelease)
#self.displayable.deregister_mpl_event_handler(self.cidmotion)
class BaseInteractionState(object):
def __init__(self, context):
self.context = context
self._set_blank_state()
def _set_blank_state(self):
self.pressed = False
self.had_drag = False
self.start_x = None
self.start_y = None
self.last_x = None
self.last_y = None
def on_press(self, event):
self.pressed = True
self.start_x = event.xdata
self.start_y = event.ydata
self.last_x = self.start_x
self.last_y = self.start_y
def on_motion(self, event):
if not self.pressed:
return
self.had_drag = True
self.on_drag(event)
self.last_x = event.xdata
self.last_y = event.ydata
def on_drag(self, event):
"""
Implement to provide state-specific behaviour on motion.
"""
pass
def on_release(self, event):
self._set_blank_state()
class RecenteringState(BaseInteractionState):
def on_release(self, event):
if (self.pressed and
not self.had_drag and
self.context.get_marker() is not None):
self.context.update_marker(self.start_x, self.start_y)
super(RecenteringState, self).on_release(event)
class MoveMarkerState(RecenteringState):
def __init__(self, context):
super(MoveMarkerState, self).__init__(context)
if context.get_marker() is None:
raise MPLViewerError("Can not move a marker if it doesn't exist!")
def on_drag(self, event):
center_x, center_y = self.context.get_marker().center
dx = event.xdata - self.last_x
dy = event.ydata - self.last_y
self.context.update_marker(center_x + dx, center_y + dy)
class CreateMarkerState(RecenteringState):
def __init__(self, context):
super(CreateMarkerState, self).__init__(context)
def on_drag(self, event):
center_x = float(self.start_x + event.xdata) / 2
center_y = float(self.start_y + event.ydata) / 2
radius = max(abs(self.start_x - event.xdata) / 2,
abs(self.start_y - event.ydata) / 2)
self.context.update_marker(center_x, center_y, radius)
class AdjustColormapState(BaseInteractionState):
def __init__(self, context):
super(AdjustColormapState, self).__init__(context)
def on_drag(self, event):
self.context.update_colormap(event.xdata - self.last_x,
event.ydata - self.last_y)
class Signal(object):
def __init__(self):
self._handlers = []
def connect(self, handler):
self._handlers.append(handler)
def disconnect(self, handler):
self._handlers.remove(handler)
def fire(self, *args):
for handler in self._handlers:
handler(*args)
| gpl-3.0 |
perryjohnson/biplaneblade | sandia_blade_lib/prep_stn33_mesh.py | 1 | 10865 | """Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 33
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.spar_cap.layer['upper']
is1 = st.internal_surface_1.layer['resin']
points_usc = [
tuple(usc.left[0]), # SparCap_upper.txt
(usc.left[0][0], 0.05),
is1.polygon.interiors[0].coords[364-188], # InternalSurface1_resin.txt
tuple(usc.right[1]), # SparCap_upper.txt
(usc.right[1][0], 0.16),
(usc.left[0][0], 0.16)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.spar_cap.layer['lower']
points_lsc = [
tuple(lsc.left[1]),
(lsc.left[1][0], 0.0),
is1.polygon.interiors[0].coords[225-188], # InternalSurface1_resin.txt
tuple(lsc.right[0]), # SparCap_lower.txt
(lsc.right[0][0], -0.15),
(lsc.left[1][0], -0.15)
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.TE_reinforcement.layer['foam']
points_teu1 = [
(ter.top[0][0], 0.16), # TE_Reinforcement_foam.txt
tuple(ter.top[0]), # TE_Reinforcement_foam.txt
(0.30, 0.07),
is1.polygon.interiors[0].coords[325-188], # InternalSurface1_resin.txt
(is1.polygon.interiors[0].coords[325-188][0], 0.16) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -0.15), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]), # TE_Reinforcement_foam.txt
(0.30, 0.0),
(0.4, 0.03),
points_teu1[-2], # InternalSurface1_resin.txt
(points_teu1[-1][0], -0.15) # InternalSurface1_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
is1t = st.internal_surface_1.layer['triax']
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
is1t.polygon.interiors[0].coords[289-153], # InternalSurface1_triax.txt
is1t.polygon.exterior.coords[37-3], # InternalSurface1_triax.txt
(is1t.polygon.exterior.coords[37-3][0], 0.16) # InternalSurface1_triax.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -0.1),
points_teu2[1],
points_teu2[2],
points_teu2[3],
(points_teu2[3][0], -0.1)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
teru = st.TE_reinforcement.layer['uniax']
est = st.external_surface.layer['triax']
esg = st.external_surface.layer['gelcoat']
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
ter.polygon.exterior.coords[0],
teru.polygon.exterior.coords[0],
(est.polygon.exterior.coords[-1][0], 0.00135),
est.polygon.exterior.coords[-2],
esg.polygon.exterior.coords[-2],
(esg.polygon.exterior.coords[-2][0], 0.16)
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -0.1),
points_teu3[1],
points_teu3[2],
points_teu3[3],
points_teu3[4],
est.polygon.exterior.coords[-1],
esg.polygon.exterior.coords[-1],
(points_teu3[4][0], -0.1)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.LE_panel.layer['foam']
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-0.7,-0.1),
(lep.bottom[0][0],-0.1),
(lep.bottom[0][0],0.16),
(-0.7, 0.16)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.LE_panel.layer['foam']
],
alt_TE_reinforcement=True,
soft_warning=False)
| gpl-3.0 |
mdboom/freetypy | examples/rendering_modes.py | 1 | 3436 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# Copyright (c) 2015, Michael Droettboom
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# Original example from freetype-py:
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Demonstrates glyph bitmap rendering modes.
'''
from __future__ import print_function, unicode_literals, absolute_import
import argparse
import freetypy as ft
import freetypy.util as ft_util
import numpy as np
import matplotlib.pyplot as plt
def rendering_modes(char):
face = ft.Face(ft_util.vera_path())
face.select_charmap(ft.ENCODING.UNICODE)
face.set_char_size(48)
for i, mode in enumerate([ft.LOAD.TARGET_MONO,
ft.LOAD.TARGET_NORMAL,
ft.LOAD.TARGET_LCD,
ft.LOAD.TARGET_LCD_V]):
glyph = face.load_char_unicode(char, ft.LOAD.RENDER | mode)
bitmap = glyph.bitmap
if mode == ft.LOAD.TARGET_MONO:
bitmap = bitmap.convert()
array = np.array(bitmap)
plt.subplot(1, 4, i+1)
plt.xticks([])
plt.yticks([])
if i < 2:
plt.imshow(array, interpolation='nearest', cmap=plt.cm.gray)
else:
plt.imshow(array, interpolation='nearest')
plt.xlabel(repr(bitmap.pixel_mode))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Displays a character using different rendering modes.')
parser.add_argument(
'-c', type=str, default='S',
help='The character to display')
args = parser.parse_args()
char = args.c
if isinstance(char, bytes):
char = char.decode('utf-8')
rendering_modes(char)
| bsd-2-clause |
calliope-project/calliope | calliope/test/test_model_data.py | 1 | 16612 | import pytest
import os
import logging
import numpy as np
import pandas as pd
import calliope
from calliope.preprocess.model_data import ModelDataFactory
from calliope.core.attrdict import AttrDict
from calliope._version import __version__
import calliope.exceptions as exceptions
from calliope.preprocess import model_run_from_yaml
from calliope.test.common.util import check_error_or_warning
@pytest.fixture(scope="module")
def model_run():
filepath = os.path.join(
os.path.dirname(calliope.__file__),
"test",
"common",
"test_model",
"model.yaml",
)
return model_run_from_yaml(filepath, scenario="simple_supply")[0]
class TestModelData:
@pytest.fixture(scope="class")
def model_data(self, model_run):
return ModelDataFactory(model_run)
def test_model_data_init(self, model_data):
for attr in [
"LOOKUP_STR",
"UNWANTED_TECH_KEYS",
"node_dict",
"tech_dict",
"model_data",
"template_config",
"link_techs",
]:
assert hasattr(model_data, attr)
for var in ["node_tech", "link_remote_techs", "link_remote_nodes"]:
assert var in model_data.model_data.data_vars.keys()
assert model_data.model_data.attrs != {}
def test_model_data_init_vars(self, model_data):
non_link_node_techs = [
("a", "test_demand_elec"),
("a", "test_supply_elec"),
("b", "test_demand_elec"),
("b", "test_supply_elec"),
]
assert len(model_data.model_data.node_tech.to_series().dropna()) == 8
assert len(model_data.model_data.link_remote_techs.to_series().dropna()) == 4
assert len(model_data.model_data.link_remote_nodes.to_series().dropna()) == 4
assert (
model_data.model_data.node_tech.to_series()
.dropna()
.index.difference(
model_data.model_data.link_remote_techs.to_series().dropna().index
)
.difference(non_link_node_techs)
.empty
)
assert (
model_data.model_data.link_remote_nodes.to_series()
.dropna()
.index.difference(
model_data.model_data.link_remote_techs.to_series().dropna().index
)
.empty
)
@pytest.mark.parametrize(
("var", "invalid"),
[
(None, True),
([], True),
((), True),
(set(), True),
(dict(), True),
(np.nan, True),
("foo", False),
(1, False),
(0, False),
([1], False),
((1,), False),
({1}, False),
({"foo": "bar"}, False),
],
)
def test_empty_or_invalid(self, model_data, var, invalid):
assert model_data._empty_or_invalid(var) == invalid
def test_strip_unwanted_keys(self, model_data, model_run):
model_data.tech_dict = model_run.techs.as_dict_flat()
model_data.node_dict = model_run.nodes.as_dict_flat()
initial_node_length = len(model_data.node_dict)
assert all(
any(tech_info.endswith(key) for tech_info in model_data.tech_dict.keys())
for key in model_data.UNWANTED_TECH_KEYS
)
model_data._strip_unwanted_keys()
assert initial_node_length == len(model_data.node_dict)
assert not any(
any(tech_info.endswith(key) for tech_info in model_data.tech_dict.keys())
for key in model_data.UNWANTED_TECH_KEYS
)
assert len(model_data.stripped_keys) > 0
@pytest.mark.parametrize(
("initial_string", "result"),
[("{}", "[\\w\\-]*"), ("{0}", "[\\w\\-]*"), ("{0}{0}", "[\\w\\-]*[\\w\\-]*")],
)
def test_format_lookup(self, model_data, initial_string, result):
assert model_data._format_lookup(initial_string) == result
@pytest.mark.parametrize(
("nesting", "key_to_check", "search_result"),
[
(["techs"], "aA.techs.bB", ["aA", "bB"]),
(["techs", "constraints"], "A.techs.B.constraints.C", ["A", "B", "C"]),
(["techs", "?{0}"], "A.techs.B.constraints.C", ["A", "B", "C"]),
(["techs", "?({0})"], "A.techs.B.C.D", ["A", "B", "C", "D"]),
(["techs", "?{0}"], "A.techs.B.constraints.C.other.D", None),
(["techs", "constraints"], "A.techs.B_.constraints.C", ["A", "B_", "C"]),
(["techs", "constraints"], "A1.techs.B1.constraints.C", ["A1", "B1", "C"]),
(["techs", "con_2"], "A_-1.techs.B-2.con_2.C_D", ["A_-1", "B-2", "C_D"]),
(["techs"], "A.techs.B.constraints.C", None),
(["techs.links.B"], "A.techs.links.B.C", ["A", "C"]),
],
)
def test_get_key_matching_nesting_default_start_end(
self, model_data, nesting, key_to_check, search_result
):
matching_search_results = model_data._get_key_matching_nesting(
nesting, key_to_check
)
if search_result is not None:
assert matching_search_results.groups() == tuple(search_result)
else:
assert matching_search_results is None
@pytest.mark.parametrize(
("start", "end", "search_result"),
[
("({0})\\.", "\\.({0})", ["aA", "bB"]),
("({0})", "({0})", None),
("[a-z]({0})\\.", "\\.({0})", ["A", "bB"]),
("({0})\\.", "\\.techs.({0})", None),
],
)
def test_get_key_matching_nesting_new_start_end(
self, model_data, start, end, search_result
):
nesting = ["techs"]
key_to_check = "aA.techs.bB"
matching_search_results = model_data._get_key_matching_nesting(
nesting, key_to_check, start, end
)
if search_result is not None:
assert matching_search_results.groups() == tuple(search_result)
else:
assert matching_search_results is None
@pytest.mark.parametrize(
("model_run_dict", "nesting", "expected_data_dict"),
[
(
{"A.techs.B.constraints.C": "D"},
["techs", "constraints"],
{("A", "B", "C"): "D"},
),
({"A.techs.B.constraints.C": 2}, ["techs", "?{0}"], {("A", "B", "C"): 2}),
({"A.techs.C": ["a", "b"]}, ["techs"], {("A", "C"): "a.b"}),
({"A.techs.C": 2, "A.foo.C": 2}, ["techs"], {("A", "C"): 2}),
({"A.techs.C": 2, "A.foo.C": 3}, ["foo"], {("A", "C"): 3}),
],
)
@pytest.mark.parametrize("get_method", ["get", "pop"])
def test_reformat_model_run_dict(
self, model_data, model_run_dict, nesting, expected_data_dict, get_method
):
init_model_run_dict = model_run_dict.copy()
data_dict = model_data._reformat_model_run_dict(
model_run_dict, nesting, get_method
)
assert data_dict == expected_data_dict
if get_method == "pop":
assert len(model_run_dict) == len(init_model_run_dict) - 1
elif get_method == "get":
assert model_run_dict == init_model_run_dict
@pytest.mark.parametrize(
("model_run_dict", "nesting", "expected_data_dict"),
[
(
{"A.techs.B.constraints.C": "D"},
["techs", "constraints"],
{("A", "B", "C", "D"): 1},
),
(
{"A.techs.C": ["D", "E"]},
["techs"],
{("A", "C", "D"): 1, ("A", "C", "E"): 1},
),
],
)
def test_reformat_model_run_dict_values_as_dim(
self, model_data, model_run_dict, nesting, expected_data_dict
):
data_dict = model_data._reformat_model_run_dict(
model_run_dict, nesting, get_method="get", values_as_dimension=True
)
assert data_dict == expected_data_dict
def test_reformat_model_run_dict_no_match(self, model_data):
data_dict = model_data._reformat_model_run_dict(
{"A.techs.B": 2}, ["foo"], get_method="get"
)
assert data_dict is None
def test_dict_to_df_basic(self, model_data):
data_dict = {("A", "B", "C"): 1}
dims = ["a", "b"]
df = model_data._dict_to_df(data_dict, dims)
assert df.index.names == dims
assert df.index[0] == list(data_dict.keys())[0][:-1]
assert df.columns[0] == list(data_dict.keys())[0][-1]
assert df.values[0] == list(data_dict.values())[0]
def test_dict_to_df_var_name(self, model_data):
data_dict = {("A", "B", "C"): 1}
dims = ["a", "b", "c"]
df = model_data._dict_to_df(data_dict, dims, var_name="foo")
assert df.index.names == dims
assert df.index[0] == list(data_dict.keys())[0]
assert df.columns[0] == "foo"
assert df.values[0] == list(data_dict.values())[0]
def test_dict_to_df_var_name_in_dims(self, model_data):
data_dict = {("A", "B", "C"): 1}
dims = ["a", "var_name", "c"]
df = model_data._dict_to_df(data_dict, dims)
assert df.index.names == ("a", "c")
assert df.index[0] == ("A", "C")
assert df.columns[0] == "B"
assert df.values[0] == list(data_dict.values())[0]
def test_dict_to_df_var_name_prefix(self, model_data):
data_dict = {("A", "B", "C"): 1}
dims = ["a", "b"]
df = model_data._dict_to_df(data_dict, dims, var_name_prefix="foo")
assert df.index.names == dims
assert df.index[0] == list(data_dict.keys())[0][:-1]
assert df.columns[0] == "foo_" + list(data_dict.keys())[0][-1]
assert df.values[0] == list(data_dict.values())[0]
def test_dict_to_df_is_link(self, model_data):
data_dict = {("A", "B", "C", "D"): 1}
dims = ["techs", "nodes", "node_to"]
df = model_data._dict_to_df(data_dict, dims, is_link=True)
assert df.index.names == ("nodes", "techs")
assert df.index[0] == ("B", "A:C")
assert df.columns[0] == list(data_dict.keys())[0][-1]
assert df.values[0] == list(data_dict.values())[0]
def test_model_run_dict_to_dataset_no_match(self, caplog, model_data):
with caplog.at_level(logging.INFO):
model_data._model_run_dict_to_dataset(
"foo", "node", ["foobar"], ["nodes", "foobar"]
)
records = [r.msg for r in caplog.records]
assert "No relevant data found for `foo` group of parameters" in records
@pytest.mark.parametrize(
("data", "idx", "cols", "out_idx"),
[
(
["foo"],
["test_transmission_elec"],
["foobar"],
["test_transmission_elec:a", "test_transmission_elec:b"],
),
(
["foo", "bar"],
["test_transmission_elec", "test_transmission_heat"],
["foobar"],
[
"test_transmission_elec:a",
"test_transmission_elec:b",
"test_transmission_heat:a",
"test_transmission_heat:b",
],
),
(["foo"], ["bar"], ["foobar"], ["bar"]),
],
)
def test_update_link_tech_names(self, model_data, data, idx, cols, out_idx):
df = pd.DataFrame(data=data, index=idx, columns=cols)
new_df = model_data._update_link_tech_names(df)
assert new_df.index.difference(out_idx).empty
@pytest.mark.parametrize(
("data", "idx", "cols", "out_idx"),
[
(
["foo"],
[("test_transmission_elec", "elec")],
["foobar"],
[
("test_transmission_elec:a", "elec"),
("test_transmission_elec:b", "elec"),
],
),
(["foo"], [("bar", "baz")], ["foobar"], [("bar", "baz")]),
],
)
def test_update_link_tech_names_multiindex(
self, model_data, data, idx, cols, out_idx
):
multiindex = pd.MultiIndex.from_tuples(idx, names=["techs", "blah"])
df = pd.DataFrame(data=data, index=multiindex, columns=cols)
new_df = model_data._update_link_tech_names(df)
assert new_df.index.difference(out_idx).empty
def test_update_link_idx_levels(self, model_data):
idx = pd.MultiIndex.from_tuples(
[("foo", "bar", "baz", "blah"), ("foo1", "bar1", "baz1", "blah")],
names=["techs", "node_to", "nodes", "blah"],
)
df = pd.DataFrame(data=[1, 2], index=idx, columns=["foobar"])
new_df = model_data._update_link_idx_levels(df)
assert new_df.index.names == ["nodes", "blah", "techs"]
assert new_df.index.difference(
[("baz", "blah", "foo:bar"), ("baz1", "blah", "foo1:bar1")]
).empty
def test_all_df_to_true(self, model_data):
df = pd.DataFrame(data=["a", "b"], index=["foo", "bar"], columns=["foobar"])
new_df = model_data._all_df_to_true(df)
assert new_df.foobar.dtype.kind == "b"
assert new_df.foobar.sum() == len(new_df)
def test_extract_node_tech_data(self, model_data, model_run):
assert not set(model_data.model_data.data_vars.keys()).difference(
["node_tech", "link_remote_techs", "link_remote_nodes"]
)
model_data_init = model_data.model_data.copy()
model_data._extract_node_tech_data()
model_data_new = model_data.model_data
for data_var in ["node_tech", "link_remote_techs", "link_remote_nodes"]:
assert model_data_init[data_var].equals(model_data_new[data_var])
for coord in ["carriers", "carrier_tiers"]:
assert coord not in model_data_init
assert coord in model_data.model_data
for var in model_data_new.data_vars.values():
assert "timesteps" not in var.dims
for key in model_run.nodes.as_dict_flat().keys():
if "constraints" in key or "switches" in key:
assert key.split(".")[-1] in model_data_new.data_vars.keys()
def test_add_time_dimension(self, model_data):
model_data._extract_node_tech_data()
assert not hasattr(model_data, "data_pre_time")
assert not hasattr(model_data, "model_data_pre_clustering")
model_data._add_time_dimension()
assert hasattr(model_data, "data_pre_time")
assert hasattr(model_data, "model_data_pre_clustering")
assert "timesteps" in model_data.model_data.resource.dims
assert "max_demand_timesteps" in model_data.model_data.data_vars.keys()
for var in model_data.model_data.data_vars.values():
assert (
var.to_series()
.astype(str)
.where(lambda x: (x.str.find("file=") > -1) | (x.str.find("df=") > -1))
.dropna()
.empty
)
def test_clean_model_data(self, model_data):
model_data._extract_node_tech_data()
model_data._add_time_dimension()
for var in model_data.model_data.data_vars.values():
assert var.attrs == {}
model_data._clean_model_data()
for var in model_data.model_data.data_vars.values():
assert var.attrs == {"parameters": 1, "is_result": 0}
@pytest.mark.parametrize("subdict", ["tech", "node"])
def test_check_data(self, model_data, subdict):
model_data._extract_node_tech_data()
setattr(model_data, f"{subdict}_dict", {"foo": 1})
with pytest.raises(exceptions.ModelError) as errmsg:
model_data._check_data()
assert check_error_or_warning(
errmsg, "Some data not extracted from inputs into model dataset"
)
def test_add_attributes(self, model_data):
model_data.model_data.attrs = {}
model_run = AttrDict({"applied_overrides": "foo", "scenario": "bar"})
model_data._add_attributes(model_run)
attr_dict = model_data.model_data.attrs
assert set(attr_dict.keys()) == set(
["calliope_version", "applied_overrides", "scenario", "defaults"]
)
attr_dict["calliope_version"] == __version__
assert attr_dict["applied_overrides"] == "foo"
assert attr_dict["scenario"] == "bar"
assert "\ncost_energy_cap" in attr_dict["defaults"]
assert "\nenergy_cap_max" in attr_dict["defaults"]
assert "\navailable_area" in attr_dict["defaults"]
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/pandas/computation/expr.py | 9 | 25483 | """:func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, lmap, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.tools.util import compose
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError, FuncNode
from pandas.computation.scope import Scope, _ensure_scope
def tokenize_string(source):
"""Tokenize a Python source code string.
Parameters
----------
source : str
A Python source code string
"""
line_reader = StringIO(source).readline
for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader):
yield toknum, tokval
def _rewrite_assign(tok):
"""Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, '==' if tokval == '=' else tokval
def _replace_booleans(tok):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == '&':
return tokenize.NAME, 'and'
elif tokval == '|':
return tokenize.NAME, 'or'
return toknum, tokval
return toknum, tokval
def _replace_locals(tok):
"""Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == '@':
return tokenize.OP, _LOCAL_TAG
return toknum, tokval
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
_rewrite_assign)):
"""Compose a collection of tokenization functions
Parameters
----------
source : str
A Python source code string
f : callable
This takes a tuple of (toknum, tokval) as its argument and returns a
tuple with the same structure but possibly different elements. Defaults
to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
``_replace_locals``.
Returns
-------
s : str
Valid Python source code
Notes
-----
The `f` parameter can be any callable that takes *and* returns input of the
form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), 'f must be callable'
return tokenize.untokenize(lmap(f, tokenize_string(source)))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if res.has_invalid_return_type:
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(res.op, lhs.type,
rhs.type))
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
return lambda lhs, rhs: Div(lhs, rhs, truediv)
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call_35(self, node, side=None, **kwargs):
""" in 3.5 the starargs attribute was changed to be more flexible, #11097 """
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
new_args = [ self.visit(arg) for arg in node.args ]
if node.keywords:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*new_args, **kwargs)
else:
new_args = [ self.visit(arg).value for arg in node.args ]
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
if key.arg:
kwargs.append(ast.keyword(keyword.arg, self.visit(keyword.value)))
return self.const_type(res(*new_args, **kwargs), self.env)
def visit_Call_legacy(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
args = [self.visit(targ) for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs)
if node.keywords or node.kwargs:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*args, **kwargs)
else:
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version, #11097
if compat.PY35:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_35
else:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_legacy
_python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=partial(_preparse, f=compose(_replace_locals,
_replace_booleans))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=0):
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self.env.scope['truediv'] = truediv
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
"""Parse an expression"""
return self._visitor.visit(self.expr)
@property
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}
| apache-2.0 |
PatrickOReilly/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 3 | 12397 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
from collections import Hashable
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
""" Compare analytic and numeric gradient of kernels. """
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
""" Check that parameter vector theta of kernel is set correctly. """
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s.rstrip("_bounds"),
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i+1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i+1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
""" Auto-correlation and cross-correlation should be consistent. """
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
""" Test that diag method of kernel returns consistent results. """
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
""" Adding kernels and multiplying kernels should be commutative. """
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
""" Anisotropic kernel should be consistent with isotropic kernels."""
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
""" Test stationarity of kernels."""
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
"""Check that hyperparameters of two kernels are equal"""
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
""" Test that sklearn's clone works correctly on kernels. """
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel, isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield check_hyperparameters_equal, kernel_cloned, kernel_cloned_clone
def test_matern_kernel():
""" Test consistency of Matern kernel for special values of nu. """
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
"""Check that GP kernels can also be used as pairwise kernels."""
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
"""Check that set_params()/get_params() is consistent with kernel.theta."""
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds is "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value]*size})
assert_almost_equal(np.exp(kernel.theta[index:index+size]),
[value]*size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
| bsd-3-clause |
kazemakase/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/networkx/drawing/nx_pylab.py | 6 | 29703 | """
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
# allow callers to override the hold state by passing hold=True|False
if 'with_labels' not in kwds:
kwds['with_labels'] = False
b = plt.ishold()
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = G.nodes()
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = G.edges()
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| lgpl-3.0 |
alasdairtran/digbeta | python/digbeta/heuristics.py | 3 | 18253 | """ Heuristics used to query the most uncertain candidate out of the unlabelled pool. """
import numpy as np
import copy
from joblib import Parallel, delayed
from numpy.random import permutation
from sklearn.preprocessing import LabelBinarizer
from sklearn.base import clone
def random_h(candidate_mask, n_candidates, **kwargs):
""" Return a random candidate.
Parameters
----------
candidate_mask : boolean array
The boolean array that tells us which data points the heuristic should look at.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : int
The indices of the best candidates (here it is random).
"""
candidate_index = np.where(candidate_mask)[0]
random_index = np.random.choice(candidate_index, n_candidates, replace=False)
return random_index
def entropy_h(X, candidate_mask, classifier, n_candidates, **kwargs):
""" Return the candidate whose prediction vector displays the greatest Shannon entropy.
Parameters
----------
X : array
The feature matrix of all the data points.
candidate_mask : boolean array
The boolean array that tells us which data points the heuristic should look at.
classifier : Classifier object
A classifier object that will be used to make predictions.
It should have the same interface as scikit-learn classifiers.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : int
The indices of the best candidates.
"""
# predict probabilities
probs = classifier.predict_proba(X[candidate_mask])
# comptue Shannon entropy
candidate_shannon = -np.sum(probs * np.log(probs), axis=1)
# index the results properly
shannon = np.empty(len(candidate_mask))
shannon[:] = -np.inf
shannon[candidate_mask] = candidate_shannon
# pick the candidate with the greatest Shannon entropy
best_candidates = np.argsort(-shannon)[:n_candidates]
return best_candidates
def margin_h(X, candidate_mask, classifier, n_candidates, **kwargs):
""" Return the candidate with the smallest margin.
The margin is defined as the difference between the two largest values
in the prediction vector.
Parameters
----------
X : array
The feature matrix of all the data points.
candidate_mask : boolean array
The boolean array that tells us which data points the heuristic should look at.
classifier : Classifier object
A classifier object that will be used to make predictions.
It should have the same interface as scikit-learn classifiers.
n_candidates : int
The number of best candidates to be selected at each iteration.
Returns
-------
best_candidates : int
The indices of the best candidates.
"""
# predict probabilities
probs = classifier.predict_proba(X[candidate_mask])
# sort the probabilities from smallest to largest
probs = np.sort(probs, axis=1)
# compute the margin (difference between two largest values)
candidate_margin = np.abs(probs[:,-1] - probs[:,-2])
# index the results properly
margin = np.empty(len(candidate_mask))
margin[:] = +np.inf
margin[candidate_mask] = candidate_margin
# pick the candidate with the smallest margin
best_candidates = np.argsort(margin)[:n_candidates]
return best_candidates
def qbb_margin_h(X, y, candidate_mask, train_mask, n_candidates, committee,
committee_samples, **kwargs):
""" Return the candidate with the smallest average margin.
We first use bagging to train k classifiers. The margin is then defined as
the average difference between the two largest values in the prediction vector.
Parameters
----------
X : array
The feature matrix of all the data points.
y : array
The target vector of all the data points.
candidate_mask : boolean array
The boolean array that tells us which data points the heuristic should look at.
train_mask : boolean array
The boolean array that tells us which data points are currently in the training set.
n_candidates : int
The number of best candidates to be selected at each iteration.
committee : BaggingClassifier object
The committee should have the same interface as scikit-learn BaggingClassifier.
Returns
-------
best_candidates : int
The indices of the best candidates.
"""
# check that the max bagging sample is not too big
committee.max_samples = min(committee_samples, len(y[train_mask]))
# train and predict
committee.fit(X[train_mask], y[train_mask])
# predict
n_samples = len(X[candidate_mask])
n_classes = len(committee.classes_)
probs = np.zeros((n_samples, n_classes))
for member in committee.estimators_:
memeber_prob = member.predict_proba(X[candidate_mask])
if n_classes == len(member.classes_):
probs += memeber_prob
else:
probs[:, member.classes_] += memeber_prob[:, range(len(member.classes_))]
# average out the probabilities
probs /= len(committee.estimators_)
# sort the probabilities from smallest to largest
probs = np.sort(probs, axis=1)
# compute the margin (difference between two largest values)
candidate_margin = np.abs(probs[:,-1] - probs[:,-2])
# index the results properly
margin = np.empty(len(candidate_mask))
margin[:] = +np.inf
margin[candidate_mask] = candidate_margin
# pick the candidate with the smallest margin
best_candidates = np.argsort(margin)[:n_candidates]
return best_candidates
def qbb_kl_h(X, y, candidate_mask, train_mask, n_candidates, committee, committee_samples,
**kwargs):
""" Return the candidate with the largest average KL divergence from the mean.
We first use bagging to train k classifiers. We then choose the candidate
that has the largest Kullback–Leibler divergence from the average.
Parameters
----------
X : array
The feature matrix of all the data points.
y : array
The target vector of all the data points.
candidate_mask : boolean array
The boolean array that tells us which data points the heuristic should look at.
train_mask : boolean array
The boolean array that tells us which data points are currently in the training set.
n_candidates : int
The number of best candidates to be selected at each iteration.
committee : BaggingClassifier object
The committee should have the same interface as scikit-learn BaggingClassifier.
Returns
-------
best_candidates : int
The indices of the best candidates.
"""
# check that the max bagging sample is not too big
committee.max_samples = min(committee_samples, len(y[train_mask]))
# train the committee
committee.fit(X[train_mask], y[train_mask])
# predict
n_samples = len(X[candidate_mask])
n_classes = len(committee.classes_)
avg_probs = np.zeros((n_samples, n_classes))
prob_list = []
for member in committee.estimators_:
member_prob = member.predict_proba(X[candidate_mask])
if n_classes == len(member.classes_):
avg_probs += member_prob
prob_list.append(member_prob)
else:
full_member_prob = np.zeros((n_samples, n_classes))
full_member_prob[:, member.classes_] += member_prob[:, range(len(member.classes_))]
avg_probs += full_member_prob
prob_list.append(full_member_prob)
# average out the probabilities
avg_probs /= len(committee.estimators_)
# compute the KL divergence
avg_kl = np.zeros(avg_probs.shape[0])
for p in prob_list:
inner = np.nan_to_num(p * np.log(p / avg_probs))
member_kl = np.sum(inner, axis=1)
avg_kl += member_kl
# average out the KL divergence
avg_kl /= len(committee)
# index the results properly
kl = np.empty(len(candidate_mask))
kl[:] = -np.inf
kl[candidate_mask] = avg_kl
# pick the candidate with the smallest margin
best_candidates = np.argsort(-kl)[:n_candidates]
return best_candidates
def _compute_A(X, pi, classes):
""" Compute the A matrix in the variance estimation technique.
Parameters
----------
X : array
The feature matrix.
pi : array
The probability matrix predicted by the classifier.
classes : array
The list of class names ordered lexicographically.
Returns
-------
A : array
The A matrix as part of the variance calcucation.
"""
n_classes = len(classes)
n_features = X.shape[1]
n_samples = X.shape[0]
width = n_classes * n_features
one_in_k = LabelBinarizer(pos_label=1, neg_label=0).fit_transform(classes)
I_same = one_in_k.repeat(n_features, axis=0)
I_same = np.tile(I_same, n_samples)
I_diff = 1 - I_same
A = np.tile(pi.flatten(), (width, 1))
B = 1 - A
C = -A
D = pi.transpose().repeat(n_features, axis=0).repeat(n_classes, axis=1)
E = X.transpose().repeat(n_classes, axis=1)
E = np.tile(E, (n_classes, 1))
G = A * B * I_same + C * D * I_diff
G = E * G
outer = np.dot(G, G.transpose())
return outer
def _compute_F(X, pi, classes, C=1):
""" Compute the F matrix in the variance estimation technqiue.
Parameters
----------
X : array
The feature matrix.
pi : array
The probability matrix predicted by the classifier.
classes : array
The list of class names ordered lexicographically.
C : float
The regularisation parameter in logistic regression.
Returns
-------
F : array
The F matrix as part of the variance calcucation.
"""
n_classes = len(classes)
n_features = X.shape[1]
n_samples = X.shape[0]
width = n_classes * n_features
I_diag = np.eye(width)
mini_off_diag = 1 - np.eye(n_features)
mini_zeros = np.zeros((n_features, n_features * n_classes))
I_mini_off_diag = np.hstack((mini_off_diag, mini_zeros))
I_mini_off_diag = np.tile(I_mini_off_diag, n_classes - 1)
I_mini_off_diag = np.hstack((I_mini_off_diag, mini_off_diag))
I_mini_off_diag = np.hsplit(I_mini_off_diag, n_classes)
I_mini_off_diag = np.vstack(I_mini_off_diag)
I_main_off_diag = 1 - I_diag - I_mini_off_diag
M = np.tile(X.transpose(), (n_classes, 1))
N = pi.transpose().repeat(n_features, axis=0)
F_1 = np.dot(M * N * (1 - N), M.transpose()) + C
F_2 = np.dot(M * N * (1 - N), M.transpose())
F_3 = np.dot(M * N, M.transpose() * N.transpose())
F = F_1 * I_diag + F_2 * I_mini_off_diag + F_3 * I_main_off_diag
F = F / n_samples
return F
def compute_pool_variance(X, pi, classes, C=1):
""" Estimate the variance of the pool.
Parameters
----------
X : array
The feature matrix.
pi : array
The probability matrix predicted by the classifier.
classes : array
The list of class names ordered lexicographically.
C : float
The regularisation parameter in logistic regression.
Returns
-------
variance : float
The estimated variance on the pool X.
"""
A = _compute_A(X, pi, classes)
F = _compute_F(X, pi, classes, C=C)
return np.trace(np.dot(A, np.linalg.inv(F)))
def pool_variance_h(X, y, candidate_mask, train_mask, classifier, n_candidates,
pool_n, C, n_jobs=-1, **kwargs):
""" Return the candidate that will minimise the expected variance of the predictions.
Parameters
----------
X_training_candidates : array
The feature matrix of the potential training candidates.
C : float
The regularisation parameter of Logistic Regression.
pool_sample_size : int
The size of the sample which will be used to estimate the variance/entropy.
n_jobs : int
The number of parallel jobs (-1 if want to use all cores)
Returns
-------
best_candidate : int
The index of the best candidate.
"""
classes = classifier.classes_ # sorted lexicographically
n_classes = len(classes)
candidate_size = np.sum(train_mask)
n_features = X.shape[1]
variance = np.empty(len(candidate_mask))
variance[:] = np.inf
# the probabilities used to calculate expected value of pool
probs = classifier.predict_proba(X[candidate_mask])
# copy the classifier (avoid modifying the original classifier)
classifier_plus = clone(classifier)
# construct the sample pool (used to estimate the variance)
unlabelled_indices = np.where(-train_mask)[0]
pool_indices = permutation(unlabelled_indices)[:pool_n]
pool_mask = np.zeros(len(candidate_mask), dtype=bool)
pool_mask[pool_indices] = True
# let's look at each candidate
candidate_indices = np.where(candidate_mask)[0]
results = Parallel(n_jobs=n_jobs)(delayed(_parallel_variance_estimate)(
X, y.copy(), train_mask.copy(), pool_mask,
clone(classifier_plus), classes, n_classes, probs, i, index, C)
for i, index in enumerate(candidate_indices))
indices, expected = zip(*results)
indices, expected = np.asarray(indices), np.asarray(expected)
assert not np.isnan(expected).any(), 'Some expected values are undefined.'
variance[indices] = expected
# pick the candidate with the smallest expected variance
best_candidates = np.argsort(variance)[:n_candidates]
return best_candidates
def _parallel_variance_estimate(X, y, train_mask, pool_mask, classifier,
classes, n_classes, probs, i, index, C):
""" Helper function. """
# assume a label and compute entropy
potential_variance = np.zeros(n_classes)
train_mask[index] = True
for cls_idx, cls in enumerate(classes):
y[index] = cls
classifier.fit(X[train_mask], y[train_mask])
pi = classifier.predict_proba(X[pool_mask])
potential_variance[cls_idx] = compute_pool_variance(X[pool_mask], pi, classes, C)
# calculate expected entropy and save result
expected = np.dot(probs[i], potential_variance)
return index, expected
def compute_pool_entropy(pi):
""" Estimate the variance of the pool.
Parameters
----------
pi : array
The probability matrix predicted by the classifier.
Returns
-------
entropy : float
The estimated entropy on the pool.
"""
return np.nan_to_num(-np.sum(pi * np.log(pi)))
def pool_entropy_h(X, y, candidate_mask, train_mask, classifier, n_candidates,
pool_n, n_jobs=-1, **kwargs):
""" Return the candidate that will minimise the expected entropy of the predictions.
Parameters
----------
X_training_candidates : array
The feature matrix of the potential training candidates.
classes : int
The name of classes.
pool_n : int
The size of the sampel pool used in estimating the entropy
n_jobs : int
The number of parallel jobs (-1 if want to use all cores)
Returns
-------
best_candidate : int
The index of the best candidate.
"""
classes = classifier.classes_ # sorted lexicographically
n_classes = len(classes)
candidate_size = np.sum(train_mask)
n_features = X.shape[1]
entropy = np.empty(len(candidate_mask))
entropy[:] = np.inf
# the probabilities used to calculate expected value of pool
probs = classifier.predict_proba(X[candidate_mask])
# copy the classifier (avoid modifying the original classifier)
classifier_plus = clone(classifier)
# construct the sample pool (used to estimate the entropy)
unlabelled_indices = np.where(-train_mask)[0]
pool_indices = permutation(unlabelled_indices)[:pool_n]
pool_mask = np.zeros(len(candidate_mask), dtype=bool)
pool_mask[pool_indices] = True
# let's look at each candidate
candidate_indices = np.where(candidate_mask)[0]
results = Parallel(n_jobs=n_jobs)(delayed(_parallel_entropy_estimate)(
X, y.copy(), train_mask.copy(), pool_mask,
clone(classifier_plus), classes, n_classes, probs, i, index)
for i, index in enumerate(candidate_indices))
indices, expected = zip(*results)
indices, expected = np.asarray(indices), np.asarray(expected)
assert not np.isnan(expected).any(), 'Some expected values are undefined.'
entropy[indices] = expected
# pick the candidate with the smallest expected entropy
best_candidates = np.argsort(entropy)[:n_candidates]
return best_candidates
def _parallel_entropy_estimate(X, y, train_mask, pool_mask, classifier,
classes, n_classes, probs, i, index):
""" Helper function. """
# assume a label and compute entropy
potential_entropy = np.zeros(n_classes)
train_mask[index] = True
for cls_idx, cls in enumerate(classes):
y[index] = cls
classifier.fit(X[train_mask], y[train_mask])
pi = classifier.predict_proba(X[pool_mask])
potential_entropy[cls_idx] = compute_pool_entropy(pi)
# calculate expected entropy and save result
expected = np.dot(probs[i], potential_entropy)
return index, expected
| gpl-3.0 |
effigies/mne-python | mne/io/base.py | 1 | 76744 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
from math import floor, ceil
import copy
from copy import deepcopy
import warnings
import os
import os.path as op
import numpy as np
from scipy.signal import hilbert
from scipy import linalg
from .constants import FIFF
from .pick import pick_types, channel_type, pick_channels
from .meas_info import write_meas_info
from .proj import (setup_proj, activate_proj, proj_equal, ProjMixin,
_has_eeg_average_ref_proj, make_eeg_average_ref_proj)
from ..channels.channels import (ContainsMixin, PickDropChannelsMixin,
SetChannelsMixin)
from ..channels.layout import read_montage, apply_montage, Montage
from .compensator import set_current_comp
from .write import (start_file, end_file, start_block, end_block,
write_dau_pack16, write_float, write_double,
write_complex64, write_complex128, write_int,
write_id, write_string)
from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter,
notch_filter, band_stop_filter, resample)
from ..parallel import parallel_func
from ..utils import (_check_fname, estimate_rank, _check_pandas_installed,
check_fname, _get_stim_channel, object_hash,
logger, verbose)
from ..viz import plot_raw, plot_raw_psds, _mutable_defaults
from ..externals.six import string_types
from ..event import concatenate_events
class _BaseRaw(ProjMixin, ContainsMixin, PickDropChannelsMixin,
SetChannelsMixin):
"""Base class for Raw data"""
@verbose
def __init__(self, *args, **kwargs):
raise NotImplementedError
def _read_segment(start, stop, sel, projector, verbose):
raise NotImplementedError
def __del__(self):
# remove file for memmap
if hasattr(self, '_data') and hasattr(self._data, 'filename'):
# First, close the file out; happens automatically on del
filename = self._data.filename
del self._data
# Now file can be removed
os.remove(filename)
def __enter__(self):
""" Entering with block """
return self
def __exit__(self, exception_type, exception_val, trace):
""" Exiting with block """
try:
self.close()
except:
return exception_type, exception_val, trace
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash raw unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def _add_eeg_ref(self, add_eeg_ref):
"""Helper to add an average EEG reference"""
if add_eeg_ref:
eegs = pick_types(self.info, meg=False, eeg=True, ref_meg=False)
projs = self.info['projs']
if len(eegs) > 0 and not _has_eeg_average_ref_proj(projs):
eeg_ref = make_eeg_average_ref_proj(self.info, activate=False)
projs.append(eeg_ref)
def _parse_get_set_params(self, item):
# make sure item is a tuple
if not isinstance(item, tuple): # only channel selection passed
item = (item, slice(None, None, None))
if len(item) != 2: # should be channels and time instants
raise RuntimeError("Unable to access raw data (need both channels "
"and time)")
if isinstance(item[0], slice):
start = item[0].start if item[0].start is not None else 0
nchan = self.info['nchan']
stop = item[0].stop if item[0].stop is not None else nchan
step = item[0].step if item[0].step is not None else 1
sel = list(range(start, stop, step))
else:
sel = item[0]
if isinstance(item[1], slice):
time_slice = item[1]
start, stop, step = (time_slice.start, time_slice.stop,
time_slice.step)
else:
item1 = item[1]
# Let's do automated type conversion to integer here
if np.array(item[1]).dtype.kind == 'i':
item1 = int(item1)
if isinstance(item1, (int, np.integer)):
start, stop, step = item1, item1 + 1, 1
else:
raise ValueError('Must pass int or slice to __getitem__')
if start is None:
start = 0
if (step is not None) and (step is not 1):
raise ValueError('step needs to be 1 : %d given' % step)
if isinstance(sel, (int, np.integer)):
sel = np.array([sel])
if sel is not None and len(sel) == 0:
raise ValueError("Empty channel list")
return sel, start, stop
def __getitem__(self, item):
"""getting raw data content with python slicing"""
sel, start, stop = self._parse_get_set_params(item)
if self.preload:
data, times = self._data[sel, start:stop], self._times[start:stop]
else:
data, times = self._read_segment(start=start, stop=stop, sel=sel,
projector=self._projector,
verbose=self.verbose)
return data, times
def __setitem__(self, item, value):
"""setting raw data content with python slicing"""
if not self.preload:
raise RuntimeError('Modifying data of Raw is only supported '
'when preloading is used. Use preload=True '
'(or string) in the constructor.')
sel, start, stop = self._parse_get_set_params(item)
# set the data
self._data[sel, start:stop] = value
def anonymize(self):
"""Anonymize data
This function will remove info['subject_info'] if it exists."""
self.info._anonymize()
@verbose
def apply_function(self, fun, picks, dtype, n_jobs, verbose=None, *args,
**kwargs):
""" Apply a function to a subset of channels.
The function "fun" is applied to the channels defined in "picks". The
data of the Raw object is modified inplace. If the function returns
a different data type (e.g. numpy.complex) it must be specified using
the dtype parameter, which causes the data type used for representing
the raw data to change.
The Raw object has to be constructed using preload=True (or string).
Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporaily stored in memory.
Note: If the data type changes (dtype != None), more memory is required
since the original and the converted data needs to be stored in
memory.
Parameters
----------
fun : function
A function to be applied to the channels. The first argument of
fun has to be a timeseries (numpy.ndarray). The function must
return an numpy.ndarray with the same size as the input.
picks : array-like of int
Indices of channels to apply the function to.
dtype : numpy.dtype
Data type to use for raw data after applying the function. If None
the data type is not modified.
n_jobs: int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
*args :
Additional positional arguments to pass to fun (first pos. argument
of fun is the timeseries of a channel).
**kwargs :
Keyword arguments to pass to fun.
"""
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded. Use '
'preload=True (or string) in the constructor.')
if not callable(fun):
raise ValueError('fun needs to be a function')
data_in = self._data
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
if n_jobs == 1:
# modify data inplace to save memory
for idx in picks:
self._data[idx, :] = fun(data_in[idx, :], *args, **kwargs)
else:
# use parallel function
parallel, p_fun, _ = parallel_func(fun, n_jobs)
data_picks_new = parallel(p_fun(data_in[p], *args, **kwargs)
for p in picks)
for pp, p in enumerate(picks):
self._data[p, :] = data_picks_new[pp]
@verbose
def apply_hilbert(self, picks, envelope=False, n_jobs=1, verbose=None):
""" Compute analytic signal or envelope for a subset of channels.
If envelope=False, the analytic signal for the channels defined in
"picks" is computed and the data of the Raw object is converted to
a complex representation (the analytic signal is complex valued).
If envelope=True, the absolute value of the analytic signal for the
channels defined in "picks" is computed, resulting in the envelope
signal.
Note: DO NOT use envelope=True if you intend to compute an inverse
solution from the raw data. If you want to compute the
envelope in source space, use envelope=False and compute the
envelope after the inverse solution has been obtained.
Note: If envelope=False, more memory is required since the original
raw data as well as the analytic signal have temporarily to
be stored in memory.
Note: If n_jobs > 1 and envelope=True, more memory is required as
"len(picks) * n_times" additional time points need to be
temporaily stored in memory.
Parameters
----------
picks : array-like of int
Indices of channels to apply the function to.
envelope : bool (default: False)
Compute the envelope signal of each channel.
n_jobs: int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
The analytic signal "x_a(t)" of "x(t)" is::
x_a = F^{-1}(F(x) 2U) = x + i y
where "F" is the Fourier transform, "U" the unit step function,
and "y" the Hilbert transform of "x". One usage of the analytic
signal is the computation of the envelope signal, which is given by
"e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the
MNE inverse solution, the enevlope in source space can be obtained
by computing the analytic signal in sensor space, applying the MNE
inverse, and computing the envelope in source space.
"""
if envelope:
self.apply_function(_envelope, picks, None, n_jobs)
else:
self.apply_function(hilbert, picks, np.complex64, n_jobs)
@verbose
def filter(self, l_freq, h_freq, picks=None, filter_length='10s',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1,
method='fft', iir_params=None, verbose=None):
"""Filter a subset of channels.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by "picks". The data of the Raw
object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
l_freq < h_freq: band-pass filter
l_freq > h_freq: band-stop filter
l_freq is not None, h_freq is None: low-pass filter
l_freq is None, h_freq is not None: high-pass filter
Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporarily stored in memory.
Note: self.info['lowpass'] and self.info['highpass'] are only updated
with picks=None.
Parameters
----------
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz
(high pass or cutoff 1 in bandpass). Not used if 'order' is
specified in iir_params.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz
(low pass or cutoff 2 in bandpass). Not used if 'order' is
specified in iir_params.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if verbose is None:
verbose = self.verbose
fs = float(self.info['sfreq'])
if l_freq == 0:
l_freq = None
if h_freq is not None and h_freq > (fs / 2.):
h_freq = None
if l_freq is not None and not isinstance(l_freq, float):
l_freq = float(l_freq)
if h_freq is not None and not isinstance(h_freq, float):
h_freq = float(h_freq)
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded to filter. Use '
'preload=True (or string) in the constructor.')
if picks is None:
if 'ICA ' in ','.join(self.ch_names):
pick_parameters = dict(misc=True, ref_meg=False)
else:
pick_parameters = dict(meg=True, eeg=True, ref_meg=False)
picks = pick_types(self.info, exclude=[], **pick_parameters)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
# update info if filter is applied to all data channels,
# and it's not a band-stop filter
if h_freq is not None and (l_freq is None or l_freq < h_freq) and \
h_freq < self.info['lowpass']:
self.info['lowpass'] = h_freq
if l_freq is not None and (h_freq is None or l_freq < h_freq) and \
l_freq > self.info['highpass']:
self.info['highpass'] = l_freq
if l_freq is None and h_freq is not None:
logger.info('Low-pass filtering at %0.2g Hz' % h_freq)
low_pass_filter(self._data, fs, h_freq,
filter_length=filter_length,
trans_bandwidth=h_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is None:
logger.info('High-pass filtering at %0.2g Hz' % l_freq)
high_pass_filter(self._data, fs, l_freq,
filter_length=filter_length,
trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
if l_freq is not None and h_freq is not None:
if l_freq < h_freq:
logger.info('Band-pass filtering from %0.2g - %0.2g Hz'
% (l_freq, h_freq))
self._data = band_pass_filter(self._data, fs, l_freq, h_freq,
filter_length=filter_length,
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth,
method=method, iir_params=iir_params, picks=picks,
n_jobs=n_jobs, copy=False)
else:
logger.info('Band-stop filtering from %0.2g - %0.2g Hz'
% (h_freq, l_freq))
self._data = band_stop_filter(self._data, fs, h_freq, l_freq,
filter_length=filter_length,
l_trans_bandwidth=h_trans_bandwidth,
h_trans_bandwidth=l_trans_bandwidth, method=method,
iir_params=iir_params, picks=picks, n_jobs=n_jobs,
copy=False)
@verbose
def notch_filter(self, freqs, picks=None, filter_length='10s',
notch_widths=None, trans_bandwidth=1.0, n_jobs=1,
method='fft', iir_params=None,
mt_bandwidth=None, p_value=0.05, verbose=None):
"""Notch filter a subset of channels.
Applies a zero-phase notch filter to the channels selected by
"picks". The data of the Raw object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
Note: If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporaily stored in memory.
Parameters
----------
freqs : float | array of float | None
Specific frequencies to filter out from data, e.g.,
np.arange(60, 241, 60) in the US or np.arange(50, 251, 50) in
Europe. None can only be used with the mode 'spectrum_fit',
where an F test is used to find sinusoidal components.
picks : array-like of int | None
Indices of channels to filter. If None only the data (MEG/EEG)
channels will be filtered.
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
Not used for 'iir' filters.
notch_widths : float | array of float | None
Width of each stop band (centred at each freq in freqs) in Hz.
If None, freqs / 200 is used.
trans_bandwidth : float
Width of the transition band in Hz.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly, CUDA is initialized, and method='fft'.
method : str
'fft' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt). 'spectrum_fit' will
use multi-taper estimation of sinusoidal components.
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
mt_bandwidth : float | None
The bandwidth of the multitaper windowing function in Hz.
Only used in 'spectrum_fit' mode.
p_value : float
p-value to use in F-test thresholding to determine significant
sinusoidal components to remove when method='spectrum_fit' and
freqs=None. Note that this will be Bonferroni corrected for the
number of frequencies, so large p-values may be justified.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For details, see mne.filter.notch_filter.
"""
if verbose is None:
verbose = self.verbose
fs = float(self.info['sfreq'])
if picks is None:
if 'ICA ' in ','.join(self.ch_names):
pick_parameters = dict(misc=True)
else:
pick_parameters = dict(meg=True, eeg=True)
picks = pick_types(self.info, exclude=[], **pick_parameters)
# let's be safe.
if len(picks) < 1:
raise RuntimeError('Could not find any valid channels for '
'your Raw object. Please contact the '
'MNE-Python developers.')
if not self.preload:
raise RuntimeError('Raw data needs to be preloaded to filter. Use '
'preload=True (or string) in the constructor.')
self._data = notch_filter(self._data, fs, freqs,
filter_length=filter_length,
notch_widths=notch_widths,
trans_bandwidth=trans_bandwidth,
method=method, iir_params=iir_params,
mt_bandwidth=mt_bandwidth, p_value=p_value,
picks=picks, n_jobs=n_jobs, copy=False)
@verbose
def resample(self, sfreq, npad=100, window='boxcar',
stim_picks=None, n_jobs=1, verbose=None):
"""Resample data channels.
Resamples all channels. The data of the Raw object is modified inplace.
The Raw object has to be constructed using preload=True (or string).
WARNING: The intended purpose of this function is primarily to speed
up computations (e.g., projection calculation) when precise timing
of events is not required, as downsampling raw data effectively
jitters trigger timings. It is generally recommended not to epoch
downsampled data, but instead epoch and then downsample, as epoching
downsampled data jitters triggers.
Parameters
----------
sfreq : float
New sample rate to use.
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
stim_picks : array of int | None
Stim channels. These channels are simply subsampled or
supersampled (without applying any filtering). This reduces
resampling artifacts in stim channels, but may lead to missing
triggers. If None, stim channels are automatically chosen using
mne.pick_types(raw.info, meg=False, stim=True, exclude=[]).
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly and CUDA is initialized.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
sfreq = float(sfreq)
o_sfreq = float(self.info['sfreq'])
offsets = np.concatenate(([0], np.cumsum(self._raw_lengths)))
new_data = list()
# set up stim channel processing
if stim_picks is None:
stim_picks = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_picks = np.asanyarray(stim_picks)
ratio = sfreq / o_sfreq
for ri in range(len(self._raw_lengths)):
data_chunk = self._data[:, offsets[ri]:offsets[ri + 1]]
new_data.append(resample(data_chunk, sfreq, o_sfreq, npad,
n_jobs=n_jobs))
new_ntimes = new_data[ri].shape[1]
# Now deal with the stim channels. In empirical testing, it was
# faster to resample all channels (above) and then replace the
# stim channels than it was to only resample the proper subset
# of channels and then use np.insert() to restore the stims
# figure out which points in old data to subsample
# protect against out-of-bounds, which can happen (having
# one sample more than expected) due to padding
stim_inds = np.minimum(np.floor(np.arange(new_ntimes)
/ ratio).astype(int),
data_chunk.shape[1] - 1)
for sp in stim_picks:
new_data[ri][sp] = data_chunk[[sp]][:, stim_inds]
self._first_samps[ri] = int(self._first_samps[ri] * ratio)
self._last_samps[ri] = self._first_samps[ri] + new_ntimes - 1
self._raw_lengths[ri] = new_ntimes
# adjust affected variables
self._data = np.concatenate(new_data, axis=1)
self.first_samp = self._first_samps[0]
self.last_samp = self.first_samp + self._data.shape[1] - 1
self.info['sfreq'] = sfreq
self._times = (np.arange(self.n_times, dtype=np.float64)
/ self.info['sfreq'])
def crop(self, tmin=0.0, tmax=None, copy=True):
"""Crop raw data file.
Limit the data from the raw file to go between specific times. Note
that the new tmin is assumed to be t=0 for all subsequently called
functions (e.g., time_as_index, or Epochs). New first_samp and
last_samp are set accordingly. And data are modified in-place when
called with copy=False.
Parameters
----------
tmin : float
New start time (must be >= 0).
tmax : float | None
New end time of the data (cannot exceed data duration).
copy : bool
If False Raw is cropped in place.
Returns
-------
raw : instance of Raw
The cropped raw object.
"""
raw = self.copy() if copy is True else self
max_time = (raw.n_times - 1) / raw.info['sfreq']
if tmax is None:
tmax = max_time
if tmin > tmax:
raise ValueError('tmin must be less than tmax')
if tmin < 0.0:
raise ValueError('tmin must be >= 0')
elif tmax > max_time:
raise ValueError('tmax must be less than or equal to the max raw '
'time (%0.4f sec)' % max_time)
smin = raw.time_as_index(tmin)[0]
smax = raw.time_as_index(tmax)[0]
cumul_lens = np.concatenate(([0], np.array(raw._raw_lengths,
dtype='int')))
cumul_lens = np.cumsum(cumul_lens)
keepers = np.logical_and(np.less(smin, cumul_lens[1:]),
np.greater_equal(smax, cumul_lens[:-1]))
keepers = np.where(keepers)[0]
raw._first_samps = np.atleast_1d(raw._first_samps[keepers])
# Adjust first_samp of first used file!
raw._first_samps[0] += smin - cumul_lens[keepers[0]]
raw._last_samps = np.atleast_1d(raw._last_samps[keepers])
raw._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax
raw._raw_lengths = raw._last_samps - raw._first_samps + 1
raw.rawdirs = [r for ri, r in enumerate(raw.rawdirs)
if ri in keepers]
raw.first_samp = raw._first_samps[0]
raw.last_samp = raw.first_samp + (smax - smin)
if raw.preload:
raw._data = raw._data[:, smin:smax + 1]
raw._times = np.arange(raw.n_times) / raw.info['sfreq']
return raw
@verbose
def save(self, fname, picks=None, tmin=0, tmax=None, buffer_size_sec=10,
drop_small_buffer=False, proj=False, format='single',
overwrite=False, split_size='2GB', verbose=None):
"""Save raw data to file
Parameters
----------
fname : string
File name of the new dataset. This has to be a new filename
unless data have been preloaded. Filenames should end with
raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif
or raw_tsss.fif.gz.
picks : array-like of int | None
Indices of channels to include. If None all channels are kept.
tmin : float | None
Time in seconds of first sample to save. If None first sample
is used.
tmax : float | None
Time in seconds of last sample to save. If None last sample
is used.
buffer_size_sec : float | None
Size of data chunks in seconds. If None, the buffer size of
the original file is used.
drop_small_buffer : bool
Drop or not the last buffer. It is required by maxfilter (SSS)
that only accepts raw files with buffers of the same size.
proj : bool
If True the data is saved with the projections applied (active).
Note: If apply_proj() was used to apply the projections,
the projectons will be active even if proj is False.
format : str
Format to use to save raw data. Valid options are 'double',
'single', 'int', and 'short' for 64- or 32-bit float, or 32- or
16-bit integers, respectively. It is STRONGLY recommended to use
'single', as this is backward-compatible, and is standard for
maintaining precision. Note that using 'short' or 'int' may result
in loss of precision, complex data cannot be saved as 'short',
and neither complex data types nor real data stored as 'double'
can be loaded with the MNE command-line tools. See raw.orig_format
to determine the format the original data were stored in.
overwrite : bool
If True, the destination file (if it exists) will be overwritten.
If False (default), an error will be raised if the file exists.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
If Raw is a concatenation of several raw files, *be warned* that only
the measurement information from the first raw file is stored. This
likely means that certain operations with external tools may not
work properly on a saved concatenated file (e.g., probably some
or all forms of SSS). It is recommended not to concatenate and
then save raw files for this reason.
"""
check_fname(fname, 'raw', ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
'raw.fif.gz', 'raw_sss.fif.gz',
'raw_tsss.fif.gz'))
if isinstance(split_size, string_types):
exp = dict(MB=20, GB=30).get(split_size[-2:], None)
if exp is None:
raise ValueError('split_size has to end with either'
'"MB" or "GB"')
split_size = int(float(split_size[:-2]) * 2 ** exp)
if split_size > 2147483648:
raise ValueError('split_size cannot be larger than 2GB')
fname = op.realpath(fname)
if not self.preload and fname in self._filenames:
raise ValueError('You cannot save data to the same file.'
' Please use a different filename.')
if self.preload:
if np.iscomplexobj(self._data):
warnings.warn('Saving raw file with complex data. Loading '
'with command-line MNE tools will not work.')
type_dict = dict(short=FIFF.FIFFT_DAU_PACK16,
int=FIFF.FIFFT_INT,
single=FIFF.FIFFT_FLOAT,
double=FIFF.FIFFT_DOUBLE)
if not format in type_dict.keys():
raise ValueError('format must be "short", "int", "single", '
'or "double"')
reset_dict = dict(short=False, int=False, single=True, double=True)
reset_range = reset_dict[format]
data_type = type_dict[format]
data_test = self[0, 0][0]
if format == 'short' and np.iscomplexobj(data_test):
raise ValueError('Complex data must be saved as "single" or '
'"double", not "short"')
# check for file existence
_check_fname(fname, overwrite)
if proj:
info = copy.deepcopy(self.info)
projector, info = setup_proj(info)
activate_proj(info['projs'], copy=False)
else:
info = self.info
projector = None
# set the correct compensation grade and make inverse compensator
inv_comp = None
if self.comp is not None:
inv_comp = linalg.inv(self.comp)
set_current_comp(info, self._orig_comp_grade)
#
# Set up the reading parameters
#
# Convert to samples
start = int(floor(tmin * self.info['sfreq']))
if tmax is None:
stop = self.last_samp + 1 - self.first_samp
else:
stop = int(floor(tmax * self.info['sfreq']))
if buffer_size_sec is None:
if 'buffer_size_sec' in self.info:
buffer_size_sec = self.info['buffer_size_sec']
else:
buffer_size_sec = 10.0
buffer_size = int(ceil(buffer_size_sec * self.info['sfreq']))
# write the raw file
_write_raw(fname, self, info, picks, format, data_type, reset_range,
start, stop, buffer_size, projector, inv_comp,
drop_small_buffer, split_size, 0, None)
def plot(self, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order='type',
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None):
"""Plot raw data
Parameters
----------
events : array | None
Events to show with vertical bars.
duration : float
Time window (sec) to plot in a given time.
start : float
Initial time to show (can be changed dynamically once plotted).
n_channels : int
Number of channels to plot at once.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to:
`dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r', emg='k',
ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k')`
bad_color : color object
Color to make bad channels.
event_color : color object
Color to use for events.
scalings : dict | None
Scale factors for the traces. If None, defaults to:
`dict(mag=1e-12, grad=4e-11, eeg=20e-6,
eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
remove_dc : bool
If True remove DC component when plotting data.
order : 'type' | 'original' | array
Order in which to plot data. 'type' groups by channel type,
'original' plots in the order of ch_names, array gives the
indices to use in plotting.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figures if True
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly (click on line).
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. Note that for efficiency and simplicity,
filtering during plotting uses forward-backward IIR filtering,
so the effective filter order will be twice ``filtorder``.
Filtering the lines for display may also produce some edge
artifacts (at the left and right edges) of the signals
during display. Filtering requires scipy >= 0.10.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work).
To mark or un-mark a channel as bad, click on the rather flat segments
of a channel's time series. The changes will be reflected immediately
in the raw object's ``raw.info['bads']`` entry.
"""
return plot_raw(self, events, duration, start, n_channels, bgcolor,
color, bad_color, event_color, scalings, remove_dc,
order, show_options, title, show, block, highpass,
lowpass, filtorder, clipping)
@verbose
def plot_psds(self, tmin=0.0, tmax=60.0, fmin=0, fmax=np.inf,
proj=False, n_fft=2048, picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, n_jobs=1, verbose=None):
"""Plot the power spectral density across channels
Parameters
----------
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If
both `picks` and `ax` are None, separate subplots will be created
for each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
How to plot area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels)
will be plotted. Bad channels will be excluded from these
calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
return plot_raw_psds(self, tmin, tmax, fmin, fmax, proj, n_fft, picks,
ax, color, area_mode, area_alpha, n_jobs)
def time_as_index(self, times, use_first_samp=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
use_first_samp : boolean
If True, time is treated as relative to the session onset, else
as relative to the recording onset.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
"""
return _time_as_index(times, self.info['sfreq'], self.first_samp,
use_first_samp)
def index_as_time(self, index, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
return _index_as_time(index, self.info['sfreq'], self.first_samp,
use_first_samp)
def estimate_rank(self, tstart=0.0, tstop=30.0, tol=1e-4,
return_singular=False, picks=None):
"""Estimate rank of the raw data
This function is meant to provide a reasonable estimate of the rank.
The true rank of the data depends on many factors, so use at your
own risk.
Parameters
----------
tstart : float
Start time to use for rank estimation. Default is 0.0.
tstop : float | None
End time to use for rank estimation. Default is 30.0.
If None, the end time of the raw file is used.
tol : float
Tolerance for singular values to consider non-zero in
calculating the rank. The singular values are calculated
in this method such that independent data are expected to
have singular value around one.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
picks : array_like of int, shape (n_selected_channels,)
The channels to be considered for rank estimation.
If None (default) meg and eeg channels are included.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
Notes
-----
If data are not pre-loaded, the appropriate data will be loaded
by this function (can be memory intensive).
Projectors are not taken into account unless they have been applied
to the data using apply_proj(), since it is not always possible
to tell whether or not projectors have been applied previously.
Bad channels will be excluded from calculations.
"""
start = max(0, self.time_as_index(tstart)[0])
if tstop is None:
stop = self.n_times - 1
else:
stop = min(self.n_times - 1, self.time_as_index(tstop)[0])
tslice = slice(start, stop + 1)
if picks is None:
picks = pick_types(self.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
# ensure we don't get a view of data
if len(picks) == 1:
return 1.0, 1.0
# this should already be a copy, so we can overwrite it
data = self[picks, tslice][0]
return estimate_rank(data, tol, return_singular, copy=False)
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
@property
def n_times(self):
"""Number of time points"""
return self.last_samp - self.first_samp + 1
def __len__(self):
return self.n_times
def load_bad_channels(self, bad_file=None, force=False):
"""
Mark channels as bad from a text file, in the style
(mostly) of the C function mne_mark_bad_channels
Parameters
----------
bad_file : string
File name of the text file containing bad channels
If bad_file = None, bad channels are cleared, but this
is more easily done directly as raw.info['bads'] = [].
force : boolean
Whether or not to force bad channel marking (of those
that exist) if channels are not found, instead of
raising an error.
"""
if bad_file is not None:
# Check to make sure bad channels are there
names = frozenset(self.info['ch_names'])
with open(bad_file) as fid:
bad_names = [l for l in fid.read().splitlines() if l]
names_there = [ci for ci in bad_names if ci in names]
count_diff = len(bad_names) - len(names_there)
if count_diff > 0:
if not force:
raise ValueError('Bad channels from:\n%s\n not found '
'in:\n%s' % (bad_file,
self._filenames[0]))
else:
warnings.warn('%d bad channels from:\n%s\nnot found '
'in:\n%s' % (count_diff, bad_file,
self._filenames[0]))
self.info['bads'] = names_there
else:
self.info['bads'] = []
def append(self, raws, preload=None):
"""Concatenate raw instances as if they were continuous
Parameters
----------
raws : list, or Raw instance
list of Raw instances to concatenate to the current instance
(in order), or a single raw instance to concatenate.
preload : bool, str, or None (default None)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). If preload is
None, preload=True or False is inferred using the preload status
of the raw files passed in.
"""
if not isinstance(raws, list):
raws = [raws]
# make sure the raws are compatible
all_raws = [self]
all_raws += raws
_check_raw_compatibility(all_raws)
# deal with preloading data first (while files are separate)
all_preloaded = self.preload and all(r.preload for r in raws)
if preload is None:
if all_preloaded:
preload = True
else:
preload = False
if preload is False:
if self.preload:
self._data = None
self._times = None
self.preload = False
else:
# do the concatenation ourselves since preload might be a string
nchan = self.info['nchan']
c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])
nsamp = c_ns[-1]
if not self.preload:
this_data = self._read_segment()[0]
else:
this_data = self._data
# allocate the buffer
if isinstance(preload, string_types):
_data = np.memmap(preload, mode='w+', dtype=this_data.dtype,
shape=(nchan, nsamp))
else:
_data = np.empty((nchan, nsamp), dtype=this_data.dtype)
_data[:, 0:c_ns[0]] = this_data
for ri in range(len(raws)):
if not raws[ri].preload:
# read the data directly into the buffer
data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]
raws[ri]._read_segment(data_buffer=data_buffer)
else:
_data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data
self._data = _data
self.preload = True
# now combine information from each raw file to construct new self
for r in raws:
self._first_samps = np.r_[self._first_samps, r._first_samps]
self._last_samps = np.r_[self._last_samps, r._last_samps]
self._raw_lengths = np.r_[self._raw_lengths, r._raw_lengths]
self.rawdirs += r.rawdirs
self._filenames += r._filenames
self.last_samp = self.first_samp + sum(self._raw_lengths) - 1
# this has to be done after first and last sample are set appropriately
if self.preload:
self._times = np.arange(self.n_times) / self.info['sfreq']
def close(self):
"""Clean up the object.
Does nothing for now.
"""
pass
def copy(self):
""" Return copy of Raw instance
"""
return deepcopy(self)
def as_data_frame(self, picks=None, start=None, stop=None, scale_time=1e3,
scalings=None, use_time_index=True, copy=True):
"""Get the epochs as Pandas DataFrame
Export raw data in tabular structure with MEG channels.
Caveat! To save memory, depending on selected data size consider
setting copy to False.
Parameters
----------
picks : array-like of int | None
If None only MEG and EEG channels are kept
otherwise the channels indices in picks are kept.
start : int | None
Data-extraction start index. If None, data will be exported from
the first sample.
stop : int | None
Data-extraction stop index. If None, data will be exported to the
last index.
scale_time : float
Scaling to be applied to time units.
scalings : dict | None
Scaling to be applied to the channels picked. If None, defaults to
``scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0)`.
use_time_index : bool
If False, times will be included as in the data table, else it will
be used as index object.
copy : bool
If true, data will be copied. Else data may be modified in place.
Returns
-------
df : instance of pandas.core.DataFrame
Raw data exported into tabular data structure.
"""
pd = _check_pandas_installed()
if picks is None:
picks = list(range(self.info['nchan']))
data, times = self[picks, start:stop]
if copy:
data = data.copy()
types = [channel_type(self.info, idx) for idx in picks]
n_channel_types = 0
ch_types_used = []
scalings = _mutable_defaults(('scalings', scalings))[0]
for t in scalings.keys():
if t in types:
n_channel_types += 1
ch_types_used.append(t)
for t in ch_types_used:
scaling = scalings[t]
idx = [picks[i] for i in range(len(picks)) if types[i] == t]
if len(idx) > 0:
data[idx] *= scaling
assert times.shape[0] == data.shape[1]
col_names = [self.ch_names[k] for k in picks]
df = pd.DataFrame(data.T, columns=col_names)
df.insert(0, 'time', times * scale_time)
if use_time_index is True:
if 'time' in df:
df['time'] = df['time'].astype(np.int64)
with warnings.catch_warnings(record=True):
df.set_index('time', inplace=True)
return df
def to_nitime(self, picks=None, start=None, stop=None,
use_first_samp=False, copy=True):
""" Raw data as nitime TimeSeries
Parameters
----------
picks : array-like of int | None
Indices of channels to apply. If None, all channels will be
exported.
start : int | None
Data-extraction start index. If None, data will be exported from
the first sample.
stop : int | None
Data-extraction stop index. If None, data will be exported to the
last index.
use_first_samp: bool
If True, the time returned is relative to the session onset, else
relative to the recording onset.
copy : bool
Whether to copy the raw data or not.
Returns
-------
raw_ts : instance of nitime.TimeSeries
"""
try:
from nitime import TimeSeries # to avoid strong dependency
except ImportError:
raise Exception('the nitime package is missing')
data, _ = self[picks, start:stop]
if copy:
data = data.copy()
start_time = self.index_as_time(start if start else 0, use_first_samp)
raw_ts = TimeSeries(data, sampling_rate=self.info['sfreq'],
t0=start_time)
raw_ts.ch_names = [self.ch_names[k] for k in picks]
return raw_ts
def __repr__(self):
s = "n_channels x n_times : %s x %s" % (len(self.info['ch_names']),
self.n_times)
return "<Raw | %s>" % s
def add_events(self, events, stim_channel=None):
"""Add events to stim channel
Parameters
----------
events : ndarray, shape (n_events, 3)
Events to add. The first column specifies the sample number of
each event, the second column is ignored, and the third column
provides the event value. If events already exist in the Raw
instance at the given sample numbers, the event values will be
added together.
stim_channel : str | None
Name of the stim channel to add to. If None, the config variable
'MNE_STIM_CHANNEL' is used. If this is not found, it will default
to 'STI 014'.
Notes
-----
Data must be preloaded in order to add events.
"""
if not self.preload:
raise RuntimeError('cannot add events unless data are preloaded')
events = np.asarray(events)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be shape (n_events, 3)')
stim_channel = _get_stim_channel(stim_channel)
pick = pick_channels(self.ch_names, stim_channel)
if len(pick) == 0:
raise ValueError('Channel %s not found' % stim_channel)
pick = pick[0]
idx = events[:, 0].astype(int)
if np.any(idx < self.first_samp) or np.any(idx > self.last_samp):
raise ValueError('event sample numbers must be between %s and %s'
% (self.first_samp, self.last_samp))
if not all(idx == events[:, 0]):
raise ValueError('event sample numbers must be integers')
self._data[pick, idx - self.first_samp] += events[:, 2]
def set_eeg_reference(raw, ref_channels, copy=True):
"""Rereference eeg channels to new reference channel(s).
If multiple reference channels are specified, they will be averaged.
Parameters
----------
raw : instance of Raw
Instance of Raw with eeg channels and reference channel(s).
ref_channels : list of str
The name(s) of the reference channel(s).
copy : bool
Specifies whether instance of Raw will be copied or modified in place.
Returns
-------
raw : instance of Raw
Instance of Raw with eeg channels rereferenced.
ref_data : array
Array of reference data subtracted from eeg channels.
"""
# Check to see that raw data is preloaded
if not raw.preload:
raise RuntimeError('Raw data needs to be preloaded. Use '
'preload=True (or string) in the constructor.')
# Make sure that reference channels are loaded as list of string
if not isinstance(ref_channels, list):
raise IOError('Reference channel(s) must be a list of string. '
'If using a single reference channel, enter as '
'a list with one element.')
# Find the indices to the reference electrodes
ref_idx = [raw.ch_names.index(c) for c in ref_channels]
# Get the reference array
ref_data = raw._data[ref_idx].mean(0)
# Get the indices to the eeg channels using the pick_types function
eeg_idx = pick_types(raw.info, exclude="bads", eeg=True, meg=False,
ref_meg=False)
# Copy raw data or modify raw data in place
if copy: # copy data
raw = raw.copy()
# Rereference the eeg channels
raw._data[eeg_idx] -= ref_data
# Return rereferenced data and reference array
return raw, ref_data
def _allocate_data(data, data_buffer, data_shape, dtype):
if data is None:
# if not already done, allocate array with right type
if isinstance(data_buffer, string_types):
# use a memmap
data = np.memmap(data_buffer, mode='w+',
dtype=dtype, shape=data_shape)
else:
data = np.zeros(data_shape, dtype=dtype)
return data
def _time_as_index(times, sfreq, first_samp=0, use_first_samp=False):
"""Convert time to indices
Parameters
----------
times : list-like | float | int
List of numbers or a number representing points in time.
use_first_samp : boolean
If True, time is treated as relative to the session onset, else
as relative to the recording onset.
Returns
-------
index : ndarray
Indices corresponding to the times supplied.
"""
index = np.atleast_1d(times) * sfreq
index -= (first_samp if use_first_samp else 0)
return index.astype(int)
def _index_as_time(index, sfreq, first_samp=0, use_first_samp=False):
"""Convert indices to time
Parameters
----------
index : list-like | int
List of ints or int representing points in time.
use_first_samp : boolean
If True, the time returned is relative to the session onset, else
relative to the recording onset.
Returns
-------
times : ndarray
Times corresponding to the index supplied.
"""
times = np.atleast_1d(index) + (first_samp if use_first_samp else 0)
return times / sfreq
class _RawShell():
"""Used for creating a temporary raw object"""
def __init__(self):
self.first_samp = None
self.last_samp = None
self.cals = None
self.rawdir = None
self._projector = None
@property
def n_times(self):
return self.last_samp - self.first_samp + 1
###############################################################################
# Writing
def _write_raw(fname, raw, info, picks, format, data_type, reset_range, start,
stop, buffer_size, projector, inv_comp, drop_small_buffer,
split_size, part_idx, prev_fname):
"""Write raw file with splitting
"""
if part_idx > 0:
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
use_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
else:
use_fname = fname
logger.info('Writing %s' % use_fname)
meas_id = info['meas_id']
fid, cals = _start_writing_raw(use_fname, info, picks, data_type,
reset_range)
first_samp = raw.first_samp + start
if first_samp != 0:
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp)
# previous file name and id
if part_idx > 0 and prev_fname is not None:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname)
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1)
end_block(fid, FIFF.FIFFB_REF)
pos_prev = None
for first in range(start, stop, buffer_size):
last = first + buffer_size
if last >= stop:
last = stop + 1
if picks is None:
data, times = raw[:, first:last]
else:
data, times = raw[picks, first:last]
if projector is not None:
data = np.dot(projector, data)
if ((drop_small_buffer and (first > start)
and (len(times) < buffer_size))):
logger.info('Skipping data chunk due to small buffer ... '
'[done]')
break
logger.info('Writing ...')
if pos_prev is None:
pos_prev = fid.tell()
_write_raw_buffer(fid, data, cals, format, inv_comp)
pos = fid.tell()
this_buff_size_bytes = pos - pos_prev
if this_buff_size_bytes > split_size / 2:
raise ValueError('buffer size is too large for the given split'
'size: decrease "buffer_size_sec" or increase'
'"split_size".')
if pos > split_size:
raise logger.warning('file is larger than "split_size"')
# Split files if necessary, leave some space for next file info
if pos >= split_size - this_buff_size_bytes - 2 ** 20:
next_fname, next_idx = _write_raw(fname, raw, info, picks, format,
data_type, reset_range, first + buffer_size, stop, buffer_size,
projector, inv_comp, drop_small_buffer, split_size,
part_idx + 1, use_fname)
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
break
pos_prev = pos
logger.info('Closing %s [done]' % use_fname)
_finish_writing_raw(fid)
return use_fname, part_idx
def _start_writing_raw(name, info, sel=None, data_type=FIFF.FIFFT_FLOAT,
reset_range=True):
"""Start write raw data in file
Data will be written in float
Parameters
----------
name : string
Name of the file to create.
info : dict
Measurement info.
sel : array of int, optional
Indices of channels to include. By default all channels are included.
data_type : int
The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT),
5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data.
reset_range : bool
If True, the info['chs'][k]['range'] parameter will be set to unity.
Returns
-------
fid : file
The file descriptor.
cals : list
calibration factors.
"""
#
# Create the file and save the essentials
#
fid = start_file(name)
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
#
# Measurement info
#
info = copy.deepcopy(info)
if sel is not None:
info['chs'] = [info['chs'][k] for k in sel]
info['nchan'] = len(sel)
ch_names = [c['ch_name'] for c in info['chs']] # name of good channels
comps = copy.deepcopy(info['comps'])
for c in comps:
row_idx = [k for k, n in enumerate(c['data']['row_names'])
if n in ch_names]
row_names = [c['data']['row_names'][i] for i in row_idx]
rowcals = c['rowcals'][row_idx]
c['rowcals'] = rowcals
c['data']['nrow'] = len(row_names)
c['data']['row_names'] = row_names
c['data']['data'] = c['data']['data'][row_idx]
info['comps'] = comps
cals = []
for k in range(info['nchan']):
#
# Scan numbers may have been messed up
#
info['chs'][k]['scanno'] = k + 1 # scanno starts at 1 in FIF format
if reset_range is True:
info['chs'][k]['range'] = 1.0
cals.append(info['chs'][k]['cal'] * info['chs'][k]['range'])
write_meas_info(fid, info, data_type=data_type, reset_range=reset_range)
#
# Start the raw data
#
start_block(fid, FIFF.FIFFB_RAW_DATA)
return fid, cals
def _write_raw_buffer(fid, buf, cals, format, inv_comp):
"""Write raw buffer
Parameters
----------
fid : file descriptor
an open raw data file.
buf : array
The buffer to write.
cals : array
Calibration factors.
format : str
'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit
float for each item. This will be doubled for complex datatypes. Note
that short and int formats cannot be used for complex data.
inv_comp : array | None
The CTF compensation matrix used to revert compensation
change when reading.
"""
if buf.shape[0] != len(cals):
raise ValueError('buffer and calibration sizes do not match')
if not format in ['short', 'int', 'single', 'double']:
raise ValueError('format must be "short", "single", or "double"')
if np.isrealobj(buf):
if format == 'short':
write_function = write_dau_pack16
elif format == 'int':
write_function = write_int
elif format == 'single':
write_function = write_float
else:
write_function = write_double
else:
if format == 'single':
write_function = write_complex64
elif format == 'double':
write_function = write_complex128
else:
raise ValueError('only "single" and "double" supported for '
'writing complex data')
if inv_comp is not None:
buf = np.dot(inv_comp / np.ravel(cals)[:, None], buf)
else:
buf = buf / np.ravel(cals)[:, None]
write_function(fid, FIFF.FIFF_DATA_BUFFER, buf)
def _finish_writing_raw(fid):
"""Finish writing raw FIF file
Parameters
----------
fid : file descriptor
an open raw data file.
"""
end_block(fid, FIFF.FIFFB_RAW_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
def _envelope(x):
""" Compute envelope signal """
return np.abs(hilbert(x))
def _check_raw_compatibility(raw):
"""Check to make sure all instances of Raw
in the input list raw have compatible parameters"""
for ri in range(1, len(raw)):
if not raw[ri].info['nchan'] == raw[0].info['nchan']:
raise ValueError('raw[%d][\'info\'][\'nchan\'] must match' % ri)
if not raw[ri].info['bads'] == raw[0].info['bads']:
raise ValueError('raw[%d][\'info\'][\'bads\'] must match' % ri)
if not raw[ri].info['sfreq'] == raw[0].info['sfreq']:
raise ValueError('raw[%d][\'info\'][\'sfreq\'] must match' % ri)
if not set(raw[ri].info['ch_names']) == set(raw[0].info['ch_names']):
raise ValueError('raw[%d][\'info\'][\'ch_names\'] must match' % ri)
if not all(raw[ri].cals == raw[0].cals):
raise ValueError('raw[%d].cals must match' % ri)
if len(raw[0].info['projs']) != len(raw[ri].info['projs']):
raise ValueError('SSP projectors in raw files must be the same')
if not all(proj_equal(p1, p2) for p1, p2 in
zip(raw[0].info['projs'], raw[ri].info['projs'])):
raise ValueError('SSP projectors in raw files must be the same')
if not all([r.orig_format == raw[0].orig_format for r in raw]):
warnings.warn('raw files do not all have the same data format, '
'could result in precision mismatch. Setting '
'raw.orig_format="unknown"')
raw[0].orig_format = 'unknown'
def concatenate_raws(raws, preload=None, events_list=None):
"""Concatenate raw instances as if they were continuous. Note that raws[0]
is modified in-place to achieve the concatenation.
Parameters
----------
raws : list
list of Raw instances to concatenate (in order).
preload : bool, or None
If None, preload status is inferred using the preload status of the
raw files passed in. True or False sets the resulting raw file to
have or not have data preloaded.
events_list : None | list
The events to concatenate. Defaults to None.
Returns
-------
raw : instance of Raw
The result of the concatenation (first Raw instance passed in).
events : ndarray of int, shape (n events, 3)
The events. Only returned if `event_list` is not None.
"""
if events_list is not None:
if len(events_list) != len(raws):
raise ValueError('`raws` and `event_list` are required '
'to be of the same length')
first, last = zip(*[(r.first_samp, r.last_samp) for r in raws])
events = concatenate_events(events_list, first, last)
raws[0].append(raws[1:], preload)
if events_list is None:
return raws[0]
else:
return raws[0], events
def get_chpi_positions(raw, t_step=None):
"""Extract head positions
Note that the raw instance must have CHPI channels recorded.
Parameters
----------
raw : instance of Raw | str
Raw instance to extract the head positions from. Can also be a
path to a Maxfilter log file (str).
t_step : float | None
Sampling interval to use when converting data. If None, it will
be automatically determined. By default, a sampling interval of
1 second is used if processing a raw data. If processing a
Maxfilter log file, this must be None because the log file
itself will determine the sampling interval.
Returns
-------
translation : array
A 2-dimensional array of head position vectors (n_time x 3).
rotation : array
A 3-dimensional array of rotation matrices (n_time x 3 x 3).
t : array
The time points associated with each position (n_time).
Notes
-----
The digitized HPI head frame y is related to the frame position X as:
Y = np.dot(rotation, X) + translation
Note that if a Maxfilter log file is being processed, the start time
may not use the same reference point as the rest of mne-python (i.e.,
it could be referenced relative to raw.first_samp or something else).
"""
if isinstance(raw, _BaseRaw):
# for simplicity, we'll sample at 1 sec intervals like maxfilter
if t_step is None:
t_step = 1.0
if not np.isscalar(t_step):
raise TypeError('t_step must be a scalar or None')
picks = pick_types(raw.info, meg=False, ref_meg=False,
chpi=True, exclude=[])
if len(picks) == 0:
raise RuntimeError('raw file has no CHPI channels')
time_idx = raw.time_as_index(np.arange(0, raw.n_times
/ raw.info['sfreq'], t_step))
data = [raw[picks, ti] for ti in time_idx]
t = np.array([d[1] for d in data])
data = np.array([d[0][:, 0] for d in data])
data = np.c_[t, data]
else:
if not isinstance(raw, string_types):
raise TypeError('raw must be an instance of Raw or string')
if not op.isfile(raw):
raise IOError('File "%s" does not exist' % raw)
if t_step is not None:
raise ValueError('t_step must be None if processing a log')
data = np.loadtxt(raw, skiprows=1) # first line is header, skip it
t = data[:, 0]
translation = data[:, 4:7].copy()
rotation = _quart_to_rot(data[:, 1:4])
return translation, rotation, t
def _quart_to_rot(q):
"""Helper to convert quarternions to rotations"""
q0 = np.sqrt(1 - np.sum(q[:, 0:3] ** 2, 1))
q1 = q[:, 0]
q2 = q[:, 1]
q3 = q[:, 2]
rotation = np.array((np.c_[(q0 ** 2 + q1 ** 2 - q2 ** 2 - q3 ** 2,
2 * (q1 * q2 - q0 * q3),
2 * (q1 * q3 + q0 * q2))],
np.c_[(2 * (q1 * q2 + q0 * q3),
q0 ** 2 + q2 ** 2 - q1 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1))],
np.c_[(2 * (q1 * q3 - q0 * q2),
2 * (q2 * q3 + q0 * q1),
q0 ** 2 + q3 ** 2 - q1 ** 2 - q2 ** 2)]
))
rotation = np.swapaxes(rotation, 0, 1).copy()
return rotation
def _check_update_montage(info, montage):
""" Helper function for eeg readers to add montage"""
if montage is not None:
if not isinstance(montage, (str, Montage)):
err = ("Montage must be str, None, or instance of Montage. "
"%s was provided" % type(montage))
raise TypeError(err)
if montage is not None:
if isinstance(montage, str):
montage = read_montage(montage, scale=False)
apply_montage(info, montage)
missing_positions = []
exclude = (FIFF.FIFFV_EOG_CH, FIFF.FIFFV_MISC_CH,
FIFF.FIFFV_STIM_CH)
for ch in info['chs']:
if not ch['kind'] in exclude:
if np.unique(ch['loc']).size == 1:
missing_positions.append(ch['ch_name'])
# raise error if positions are missing
if missing_positions:
err = ("The following positions are missing from the montage "
"definitions: %s. If those channels lack positions "
"because they are EOG channels use the eog parameter."
% str(missing_positions))
raise KeyError(err)
| bsd-3-clause |
yishayv/lyacorr | graphs/plot_comoving_diff.py | 1 | 2687 | """
Plot the differences in comoving distance at redshift 0-5, using various cosmologies.
"""
import lmfit
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
from astropy.cosmology import Planck13, WMAP5, WMAP7, WMAP9
ar_z = np.arange(0, 5, 0.1)
def delta_dist(params, z1, cosmology, bao_scale):
return [(cosmology.comoving_distance(z=params['delta_z'].value + z1) -
cosmology.comoving_distance(z=z1)).value - bao_scale]
def find_bao_redshift(z1, cosmology):
params = lmfit.Parameters()
params.add('delta_z', 0.1)
result = lmfit.minimize(delta_dist, params, kws={'z1': z1, 'cosmology': cosmology, 'bao_scale': 100.})
delta_z = result.params['delta_z'].value
return delta_z
print(ar_z)
ar_delta_z_planck13 = [find_bao_redshift(z, Planck13) for z in ar_z]
ar_delta_z_wmap5 = [find_bao_redshift(z, WMAP5) for z in ar_z]
ar_delta_z_wmap7 = [find_bao_redshift(z, WMAP7) for z in ar_z]
ar_delta_z_wmap9 = [find_bao_redshift(z, WMAP9) for z in ar_z]
# print(ar_delta_z_planck13, Planck13.comoving_distance(ar_z + ar_delta_z_planck13) -
# Planck13.comoving_distance(ar_z))
#
# plt.plot(ar_z, Planck13.comoving_distance(ar_z) / Planck13.comoving_distance(ar_z))
# plt.plot(ar_z, Planck13.comoving_distance(ar_z + ar_delta_z_planck13 - ar_delta_z_wmap7) /
# Planck13.comoving_distance(ar_z))
# plt.show()
# print(scipy.misc.derivative(func=Planck13.comoving_distance, x0=2, dx=0.1))
# ar_dcmv_dz_planck13 = np.array([scipy.misc.derivative(
# func=lambda x: Planck13.comoving_distance(x).value, x0=z, dx=0.01) for z in ar_z])
# ar_dcmv_dz_wmap7 = np.array([scipy.misc.derivative(
# func=lambda x: WMAP7.comoving_distance(x).value, x0=z, dx=0.01) for z in ar_z])
# plt.plot(ar_z, -(ar_dcmv_dz_planck13 - ar_dcmv_dz_wmap7) * ar_delta_z_planck13)
# plt.show()
del scipy.misc
ar_base_cmvd_planck13 = Planck13.comoving_distance(ar_z)
ar_true_planck13_cmvd = Planck13.comoving_distance(ar_z + ar_delta_z_planck13)
ar_base_cmvd_wmap5 = WMAP5.comoving_distance(ar_z)
ar_wmap5_apparent_cmvd = WMAP5.comoving_distance(ar_z + ar_delta_z_planck13)
ar_base_cmvd_wmap7 = WMAP7.comoving_distance(ar_z)
ar_wmap7_apparent_cmvd = WMAP7.comoving_distance(ar_z + ar_delta_z_planck13)
ar_base_cmvd_wmap9 = WMAP9.comoving_distance(ar_z)
ar_wmap9_apparent_cmvd = WMAP9.comoving_distance(ar_z + ar_delta_z_planck13)
plt.plot(ar_z, ar_true_planck13_cmvd - ar_base_cmvd_planck13)
plt.plot(ar_z, ar_wmap5_apparent_cmvd - ar_base_cmvd_wmap5)
plt.plot(ar_z, ar_wmap7_apparent_cmvd - ar_base_cmvd_wmap7)
plt.plot(ar_z, ar_wmap9_apparent_cmvd - ar_base_cmvd_wmap9)
# plt.plot(ar_z, ar_wmap7_apparent_cmvd - ar_true_planck13_cmvd)
plt.show()
| mit |
dashmoment/facerecognition | py/apps/scripts/lpq_experiment.py | 2 | 10667 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import numpy as np
from scipy import ndimage
import os
import sys
sys.path.append("../..")
# try to import the PIL Image
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.pyplot as plt
import textwrap
import logging
from facerec.feature import SpatialHistogram
from facerec.distance import ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.lbp import LPQ, ExtendedLBP
from facerec.validation import SimpleValidation, precision
from facerec.util import shuffle_array
EXPERIMENT_NAME = "LocalPhaseQuantizationExperiment"
# ITER_MAX is the number of experimental runs, as described in the
# original paper. For testing purposes, it was set to 1, but it
# should be set to a higher value to get at least a little confidence
# in the results.
ITER_MAX = 1
class FileNameFilter:
"""
Base class used for filtering files.
"""
def __init__(self, name):
self._name = name
def __call__(self, filename):
return True
def __repr__(self):
return "FileNameFilter (name=%s)" % (self._name)
class YaleBaseFilter(FileNameFilter):
"""
This Filter filters files, based on their filetype ending (.pgm) and
their azimuth and elevation. The higher the angle, the more shadows in
the face. This is useful for experiments with illumination and
preprocessing.
"""
def __init__(self, min_azimuth, max_azimuth, min_elevation, max_elevation):
FileNameFilter.__init__(self, "Filter YaleFDB Subset1")
self._min_azimuth = min_azimuth
self._max_azimuth = max_azimuth
self._min_elevation = min_elevation
self._max_elevation = max_elevation
def __call__(self, filename):
# We only want the PGM files:
filetype = filename[-4:]
if filetype != ".pgm":
return False
# There are "Ambient" PGM files, ignore them:
if "Ambient" in filename:
return False
azimuth = abs(int(filename[12:16]))
elevation = abs(int(filename[17:20]))
# Now filter based on angles:
if azimuth < self._min_azimuth or azimuth > self._max_azimuth:
return False
if elevation < self._min_elevation or elevation > self._max_elevation:
return False
return True
def __repr__(self):
return "Yale FDB Filter (min_azimuth=%s, max_azimuth=%s, min_elevation=%s, max_elevation=%s)" % (min_azimuth, max_azimuth, min_elevation, max_elevation)
def read_images(path, fileNameFilter=FileNameFilter("None"), sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
if fileNameFilter(filename):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
def apply_gaussian(X, sigma):
"""A simple function to apply a Gaussian Blur on each image in X.
Args:
X: A list of images.
sigma: sigma to apply
Returns:
Y: The processed images
"""
return np.array([ndimage.gaussian_filter(x, sigma) for x in X])
def results_to_list(validation_results):
return [precision(result.true_positives,result.false_positives) for result in validation_results]
def partition_data(X, y):
"""
Shuffles the input data and splits it into a new set of images. This resembles the experimental setup
used in the paper on the Local Phase Quantization descriptor in:
"Recognition of Blurred Faces Using Local Phase Quantization", Timo Ahonen, Esa Rahtu, Ville Ojansivu, Janne Heikkila
What it does is to build a subset for each class, so it has 1 image for training and the rest for testing.
The original dataset is shuffled for each call, hence you always get a new partitioning.
"""
Xs,ys = shuffle_array(X,y)
# Maps index to class:
mapping = {}
for i in xrange(len(y)):
yi = ys[i]
try:
mapping[yi].append(i)
except KeyError:
mapping[yi] = [i]
# Get one image for each subject:
Xtrain, ytrain = [], []
Xtest, ytest = [], []
# Finally build partition:
for key, indices in mapping.iteritems():
# Add images:
Xtrain.extend([ Xs[i] for i in indices[:1] ])
ytrain.extend([ ys[i] for i in indices[:1] ])
Xtest.extend([ Xs[i] for i in indices[1:20]])
ytest.extend([ ys[i] for i in indices[1:20]])
# Return shuffled partitions:
return Xtrain, ytrain, Xtest, ytest
class ModelWrapper:
def __init__(model):
self.model = model
self.result = []
if __name__ == "__main__":
# This is where we write the results to, if an output_dir is given
# in command line:
out_dir = None
# You'll need at least a path to your image data, please see
# the tutorial coming with this source code on how to prepare
# your image data:
if len(sys.argv) < 2:
print "USAGE: lpq_experiment.py </path/to/images>"
sys.exit()
# Define filters for the Dataset:
yale_subset_0_40 = YaleBaseFilter(0, 40, 0, 40)
# Now read in the image data. Apply filters, scale to 128 x 128 pixel:
[X,y] = read_images(sys.argv[1], yale_subset_0_40, sz=(64,64))
# Set up a handler for logging:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# The models we want to evaluate:
model0 = PredictableModel(feature=SpatialHistogram(lbp_operator=ExtendedLBP()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
model1 = PredictableModel(feature=SpatialHistogram(lbp_operator=LPQ()), classifier=NearestNeighbor(dist_metric=ChiSquareDistance(), k=1))
# The sigmas we'll apply for each run:
sigmas = [0]
print 'The experiment will be run %s times!' % ITER_MAX
# Initialize experiments (with empty results):
experiments = {}
experiments['lbp_model'] = { 'model': model0, 'results' : {}, 'color' : 'r', 'linestyle' : '--', 'marker' : '*'}
experiments['lpq_model'] = { 'model': model1, 'results' : {}, 'color' : 'b', 'linestyle' : '--', 'marker' : 's'}
# Loop to acquire the results for each experiment:
for sigma in sigmas:
print "Setting sigma=%s" % sigma
for key, value in experiments.iteritems():
print 'Running experiment for model=%s' % key
# Define the validators for the model:
cv0 = SimpleValidation(value['model'])
for iteration in xrange(ITER_MAX):
print "Repeating experiment %s/%s." % (iteration + 1, ITER_MAX)
# Split dataset according to the papers description:
Xtrain, ytrain, Xtest, ytest = partition_data(X,y)
# Apply a gaussian blur on the images:
Xs = apply_gaussian(Xtest, sigma)
# Run each validator with the given data:
experiment_description = "%s (iteration=%s, sigma=%.2f)" % (EXPERIMENT_NAME, iteration, sigma)
cv0.validate(Xtrain, ytrain, Xs, ytest, experiment_description)
# Get overall results:
true_positives = sum([validation_result.true_positives for validation_result in cv0.validation_results])
false_positives = sum([validation_result.false_positives for validation_result in cv0.validation_results])
# Calculate overall precision:
prec = precision(true_positives,false_positives)
# Store the result:
print key
experiments[key]['results'][sigma] = prec
# Make a nice plot of this textual output:
fig = plt.figure()
# Holds the legend items:
plot_legend = []
# Add the Validation results:
for experiment_name, experiment_definition in experiments.iteritems():
print key, experiment_definition
results = experiment_definition['results']
(xvalues, yvalues) = zip(*[(k,v) for k,v in results.iteritems()])
# Add to the legend:
plot_legend.append(experiment_name)
# Put the results into the plot:
plot_color = experiment_definition['color']
plot_linestyle = experiment_definition['linestyle']
plot_marker = experiment_definition['marker']
plt.plot(sigmas, yvalues, linestyle=plot_linestyle, marker=plot_marker, color=plot_color)
# Put the legend below the plot (TODO):
plt.legend(plot_legend, prop={'size':6}, numpoints=1, loc='upper center', bbox_to_anchor=(0.5, -0.2), fancybox=True, shadow=True, ncol=1)
# Scale y-axis between 0,1 to see the Precision:
plt.ylim(0,1)
plt.xlim(-0.2, max(sigmas) + 1)
# Finally add the labels:
plt.title(EXPERIMENT_NAME)
plt.ylabel('Precision')
plt.xlabel('Sigma')
fig.subplots_adjust(bottom=0.5)
# Save the gifure and we are out of here!
plt.savefig("lpq_experiment.png", bbox_inches='tight',dpi=100)
| bsd-3-clause |
openeventdata/Focus_Locality_Extraction | Focus_Locality/Sentence_Embedding_Approach/Testing/SIFpreprocessing_test.py | 1 | 5830 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 10 11:34:46 2017
@author: maryam
"""
import nltk
import numpy as np
import sys
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
np.seterr(divide='ignore', invalid='ignore')
#reload(sys)
#sys.setdefaultencoding("utf-8")
stop = set(stopwords.words('english'))
to_filter = [',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', "'s",'``', '"', "'", '.' , "''"]
def parse_files(trainlist):
corpus= ''
for trainl in trainlist:
text = trainl.lower().replace('\n', ' ')
#text = unicode(text, errors='ignore')
corpus += text.replace('\n', ' ') +'\n'
vocabDic = nltk.FreqDist(w.lower() for w in nltk.tokenize.word_tokenize(corpus))
vocabDic1 = [(w,v) for (w,v) in vocabDic.items() if (w not in to_filter and not w.isdigit())]
vocabulary = [w for (w,v) in vocabDic1]
vocabFreq = [v for (w,v) in vocabDic1]
return corpus, vocabulary, vocabFreq
def index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha):
# alpha= 0.001
summ = sum(vocabFreq)
lines1 = [line.strip().replace('_',' ') for line in trainTextList]
X_index= []
weight= []
for line in lines1:
if line == '':
continue
word1 = nltk.tokenize.word_tokenize(line)
word = [w for w in word1 if (w not in to_filter and not w.isdigit())]
x = [0] * len(word)
w = [1] * len(word)
for i in range(len(word)):
try:
x[i] = vocabulary.index(word[i].lower())
except Exception as excep:
print (excep)
continue
try:
w[i] = alpha / (alpha + 1.0* vocabFreq[x[i]] / summ) #main formula
except Exception as excep:
print (excep)
continue
X_index.append(x)
weight.append(w)
return X_index , weight
def word2vec(word2vec_Dictionary, vocabulary, lang):
word2vec2= []
for word in vocabulary:
try:
#print (word)
word2vec = word2vec_Dictionary[word.encode('utf-8')]
except Exception:
#print 'error'
word2vec = [0.0000001] * 300
word2vec2.append(word2vec)
return word2vec2
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
WeArr=np.asarray(We)
n_samples = len(x)
emb = np.zeros((n_samples, 300))
for i in xrange(n_samples):
emb[i,:] = np.asarray(w[i]).dot(WeArr[[np.asarray(x[i])],:]) / np.count_nonzero(np.asarray(w[i]))
return emb
def compute_pc(X,npc):
"""
Compute the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: component_[i,:] is the i-th pc
"""
svd = TruncatedSVD(n_components=npc, n_iter=7, random_state=0)
svd.fit(X)
return svd.components_
def remove_pc(X, npc):
"""
Remove the projection on the principal components
:param X: X[i,:] is a data point
:param npc: number of principal components to remove
:return: XX[i, :] is the data point after removing its projection
"""
pc = compute_pc(X, npc)
if npc==2:
XX = X - X.dot(pc.transpose()) * pc
else:
XX = X - X.dot(pc.transpose()).dot(pc)
return XX
def SIF_embedding(We, x, w, npc):
"""
Compute the scores between pairs of sentences using weighted average + removing the projection on the first principal component
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in the i-th sentence
:param w: w[i, :] are the weights for the words in the i-th sentence
:param params.rmpc: if >0, remove the projections of the sentence embeddings to their first principal component
:return: emb, emb[i, :] is the embedding for sentence i
"""
emb = get_weighted_average(We, x, w)
if npc > 0:
emb = remove_pc(emb, npc)
return emb
def makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We):
x , w= index_vector(trainTextList, vocabulary, vocabFreq, corpus, alpha)
emb = get_weighted_average(We, x, w)
embList = emb.tolist()
newemb= []
x, y = emb.shape
for i in range (x):
if (not np.isnan(emb[i,0]) and not np.isinf(emb[i,0]) ):
newemb.append(embList[i])
emb = np.asarray(newemb)
emb = remove_pc(emb, npc=1)
return emb
def main(alpha, lang, trainTextList, word2vec_Dictionary):
corpus , vocabulary, vocabFreq = parse_files(trainTextList)
We= word2vec(word2vec_Dictionary, vocabulary, lang)
emb = makingfile(trainTextList, vocabulary, vocabFreq, corpus, alpha, We)
return emb
if __name__ == '__main__':
if len(sys.argv) <3:
sys.exit()
else:
alpha = float(sys.argv[1])
lang= sys.argv[2]
SentenceListTest= sys.argv[3]
emb= main(alpha, lang, SentenceListTest)
# SentenceListTest= ['''A member of the Somali Federal Parliament has been shot dead by unknown gunmen on Thursday morning in Mogadishu, officials said. Ahmed Mohamud Hayd was killed in a drive-by shooting after he left his hotel in a heavily policed area, witnesses said.''',''' His bodyguard was also killed and a parliamentary secretary wounded in the shooting.''']
# emb = main(0.01, 'en', SentenceListTest)
# print emb
| mit |
loretoparisi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/lines.py | 69 | 48233 | """
This module contains all the 2D line class which can draw with a
variety of line styles, markers and colors.
"""
# TODO: expose cap and join style attrs
from __future__ import division
import numpy as np
from numpy import ma
from matplotlib import verbose
import artist
from artist import Artist
from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\
flatten
from colors import colorConverter
from path import Path
from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform
from matplotlib import rcParams
# special-purpose marker identifiers:
(TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN,
CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8)
# COVERAGE NOTE: Never called internally or from examples
def unmasked_index_ranges(mask, compressed = True):
warnings.warn("Import this directly from matplotlib.cbook",
DeprecationWarning)
# Warning added 2008/07/22
from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges
return _unmasked_index_ranges(mask, compressed=compressed)
def segment_hits(cx, cy, x, y, radius):
"""
Determine if any line segments are within radius of a
point. Returns the list of line segments that are within that
radius.
"""
# Process single points specially
if len(x) < 2:
res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 )
return res
# We need to lop the last element off a lot.
xr,yr = x[:-1],y[:-1]
# Only look at line segments whose nearest point to C on the line
# lies within the segment.
dx,dy = x[1:]-xr, y[1:]-yr
Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0
u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq
candidates = (u>=0) & (u<=1)
#if any(candidates): print "candidates",xr[candidates]
# Note that there is a little area near one side of each point
# which will be near neither segment, and another which will
# be near both, depending on the angle of the lines. The
# following radius test eliminates these ambiguities.
point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2
#if any(point_hits): print "points",xr[candidates]
candidates = candidates & ~(point_hits[:-1] | point_hits[1:])
# For those candidates which remain, determine how far they lie away
# from the line.
px,py = xr+u*dx,yr+u*dy
line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2
#if any(line_hits): print "lines",xr[candidates]
line_hits = line_hits & candidates
points, = point_hits.ravel().nonzero()
lines, = line_hits.ravel().nonzero()
#print points,lines
return np.concatenate((points,lines))
class Line2D(Artist):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
lineStyles = _lineStyles = { # hidden names deprecated
'-' : '_draw_solid',
'--' : '_draw_dashed',
'-.' : '_draw_dash_dot',
':' : '_draw_dotted',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
_drawStyles_l = {
'default' : '_draw_lines',
'steps-mid' : '_draw_steps_mid',
'steps-pre' : '_draw_steps_pre',
'steps-post' : '_draw_steps_post',
}
_drawStyles_s = {
'steps' : '_draw_steps_pre',
}
drawStyles = {}
drawStyles.update(_drawStyles_l)
drawStyles.update(_drawStyles_s)
markers = _markers = { # hidden names deprecated
'.' : '_draw_point',
',' : '_draw_pixel',
'o' : '_draw_circle',
'v' : '_draw_triangle_down',
'^' : '_draw_triangle_up',
'<' : '_draw_triangle_left',
'>' : '_draw_triangle_right',
'1' : '_draw_tri_down',
'2' : '_draw_tri_up',
'3' : '_draw_tri_left',
'4' : '_draw_tri_right',
's' : '_draw_square',
'p' : '_draw_pentagon',
'*' : '_draw_star',
'h' : '_draw_hexagon1',
'H' : '_draw_hexagon2',
'+' : '_draw_plus',
'x' : '_draw_x',
'D' : '_draw_diamond',
'd' : '_draw_thin_diamond',
'|' : '_draw_vline',
'_' : '_draw_hline',
TICKLEFT : '_draw_tickleft',
TICKRIGHT : '_draw_tickright',
TICKUP : '_draw_tickup',
TICKDOWN : '_draw_tickdown',
CARETLEFT : '_draw_caretleft',
CARETRIGHT : '_draw_caretright',
CARETUP : '_draw_caretup',
CARETDOWN : '_draw_caretdown',
'None' : '_draw_nothing',
' ' : '_draw_nothing',
'' : '_draw_nothing',
}
filled_markers = ('o', '^', 'v', '<', '>',
's', 'd', 'D', 'h', 'H', 'p', '*')
zorder = 2
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
def __str__(self):
if self._label != "":
return "Line2D(%s)"%(self._label)
elif hasattr(self, '_x') and len(self._x) > 3:
return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\
%(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1])
elif hasattr(self, '_x'):
return "Line2D(%s)"\
%(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)]))
else:
return "Line2D()"
def __init__(self, xdata, ydata,
linewidth = None, # all Nones default to rc
linestyle = None,
color = None,
marker = None,
markersize = None,
markeredgewidth = None,
markeredgecolor = None,
markerfacecolor = None,
antialiased = None,
dash_capstyle = None,
solid_capstyle = None,
dash_joinstyle = None,
solid_joinstyle = None,
pickradius = 5,
drawstyle = None,
**kwargs
):
"""
Create a :class:`~matplotlib.lines.Line2D` instance with *x*
and *y* data in sequences *xdata*, *ydata*.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
See :meth:`set_linestyle` for a decription of the line styles,
:meth:`set_marker` for a description of the markers, and
:meth:`set_drawstyle` for a description of the draw styles.
"""
Artist.__init__(self)
#convert sequences to numpy arrays
if not iterable(xdata):
raise RuntimeError('xdata must be a sequence')
if not iterable(ydata):
raise RuntimeError('ydata must be a sequence')
if linewidth is None : linewidth=rcParams['lines.linewidth']
if linestyle is None : linestyle=rcParams['lines.linestyle']
if marker is None : marker=rcParams['lines.marker']
if color is None : color=rcParams['lines.color']
if markersize is None : markersize=rcParams['lines.markersize']
if antialiased is None : antialiased=rcParams['lines.antialiased']
if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle']
if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle']
if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle']
if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle']
if drawstyle is None : drawstyle='default'
self.set_dash_capstyle(dash_capstyle)
self.set_dash_joinstyle(dash_joinstyle)
self.set_solid_capstyle(solid_capstyle)
self.set_solid_joinstyle(solid_joinstyle)
self.set_linestyle(linestyle)
self.set_drawstyle(drawstyle)
self.set_linewidth(linewidth)
self.set_color(color)
self.set_marker(marker)
self.set_antialiased(antialiased)
self.set_markersize(markersize)
self._dashSeq = None
self.set_markerfacecolor(markerfacecolor)
self.set_markeredgecolor(markeredgecolor)
self.set_markeredgewidth(markeredgewidth)
self._point_size_reduction = 0.5
self.verticalOffset = None
# update kwargs before updating data to give the caller a
# chance to init axes (and hence unit support)
self.update(kwargs)
self.pickradius = pickradius
if is_numlike(self._picker):
self.pickradius = self._picker
self._xorig = np.asarray([])
self._yorig = np.asarray([])
self._invalid = True
self.set_data(xdata, ydata)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the line. The pick
radius determines the precision of the location test (usually
within five points of the value). Use
:meth:`~matplotlib.lines.Line2D.get_pickradius` or
:meth:`~matplotlib.lines.Line2D.set_pickradius` to view or
modify it.
Returns *True* if any values are within the radius along with
``{'ind': pointlist}``, where *pointlist* is the set of points
within the radius.
TODO: sort returned indices by distance
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not is_numlike(self.pickradius):
raise ValueError,"pick radius should be a distance"
# Make sure we have data to plot
if self._invalid:
self.recache()
if len(self._xy)==0: return False,{}
# Convert points to pixels
path, affine = self._transformed_path.get_transformed_path_and_affine()
path = affine.transform_path(path)
xy = path.vertices
xt = xy[:, 0]
yt = xy[:, 1]
# Convert pick radius from points to pixels
if self.figure == None:
warning.warn('no figure set when check if mouse is on line')
pixels = self.pickradius
else:
pixels = self.figure.dpi/72. * self.pickradius
# Check for collision
if self._linestyle in ['None',None]:
# If no line, return the nearby point(s)
d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2
ind, = np.nonzero(np.less_equal(d, pixels**2))
else:
# If line, return the nearby segment(s)
ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels)
# Debugging message
if False and self._label != u'':
print "Checking line",self._label,"at",mouseevent.x,mouseevent.y
print 'xt', xt
print 'yt', yt
#print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2.
print 'ind',ind
# Return the point(s) within radius
return len(ind)>0,dict(ind=ind)
def get_pickradius(self):
'return the pick radius used for containment tests'
return self.pickradius
def setpickradius(self,d):
"""Sets the pick radius used for containment tests
ACCEPTS: float distance in points
"""
self.pickradius = d
def set_picker(self,p):
"""Sets the event picker details for the line.
ACCEPTS: float distance in points or callable pick function
``fn(artist, event)``
"""
if callable(p):
self._contains = p
else:
self.pickradius = p
self._picker = p
def get_window_extent(self, renderer):
bbox = Bbox.unit()
bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()),
ignore=True)
# correct for marker size, if any
if self._marker is not None:
ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5
bbox = bbox.padded(ms)
return bbox
def set_axes(self, ax):
Artist.set_axes(self, ax)
if ax.xaxis is not None:
self._xcid = ax.xaxis.callbacks.connect('units', self.recache)
if ax.yaxis is not None:
self._ycid = ax.yaxis.callbacks.connect('units', self.recache)
set_axes.__doc__ = Artist.set_axes.__doc__
def set_data(self, *args):
"""
Set the x and y data
ACCEPTS: 2D array
"""
if len(args)==1:
x, y = args[0]
else:
x, y = args
not_masked = 0
if not ma.isMaskedArray(x):
x = np.asarray(x)
not_masked += 1
if not ma.isMaskedArray(y):
y = np.asarray(y)
not_masked += 1
if (not_masked < 2 or
(x is not self._xorig and
(x.shape != self._xorig.shape or np.any(x != self._xorig))) or
(y is not self._yorig and
(y.shape != self._yorig.shape or np.any(y != self._yorig)))):
self._xorig = x
self._yorig = y
self._invalid = True
def recache(self):
#if self.axes is None: print 'recache no axes'
#else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units
if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig):
x = ma.asarray(self.convert_xunits(self._xorig), float)
y = ma.asarray(self.convert_yunits(self._yorig), float)
x = ma.ravel(x)
y = ma.ravel(y)
else:
x = np.asarray(self.convert_xunits(self._xorig), float)
y = np.asarray(self.convert_yunits(self._yorig), float)
x = np.ravel(x)
y = np.ravel(y)
if len(x)==1 and len(y)>1:
x = x * np.ones(y.shape, float)
if len(y)==1 and len(x)>1:
y = y * np.ones(x.shape, float)
if len(x) != len(y):
raise RuntimeError('xdata and ydata must be the same length')
x = x.reshape((len(x), 1))
y = y.reshape((len(y), 1))
if ma.isMaskedArray(x) or ma.isMaskedArray(y):
self._xy = ma.concatenate((x, y), 1)
else:
self._xy = np.concatenate((x, y), 1)
self._x = self._xy[:, 0] # just a view
self._y = self._xy[:, 1] # just a view
# Masked arrays are now handled by the Path class itself
self._path = Path(self._xy)
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_transform(self, t):
"""
set the Transformation instance used by this artist
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Artist.set_transform(self, t)
self._invalid = True
# self._transformed_path = TransformedPath(self._path, self.get_transform())
def _is_sorted(self, x):
"return true if x is sorted"
if len(x)<2: return 1
return np.alltrue(x[1:]-x[0:-1]>=0)
def draw(self, renderer):
if self._invalid:
self.recache()
renderer.open_group('line2d')
if not self._visible: return
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
gc.set_snap(self.get_snap())
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
self._lineFunc = getattr(self, funcname)
funcname = self.drawStyles.get(self._drawstyle, '_draw_lines')
drawFunc = getattr(self, funcname)
drawFunc(renderer, gc, tpath, affine.frozen())
if self._marker is not None:
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self._markeredgewidth)
gc.set_alpha(self._alpha)
funcname = self._markers.get(self._marker, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_points_and_affine()
markerFunc = getattr(self, funcname)
markerFunc(renderer, gc, tpath, affine.frozen())
renderer.close_group('line2d')
def get_antialiased(self): return self._antialiased
def get_color(self): return self._color
def get_drawstyle(self): return self._drawstyle
def get_linestyle(self): return self._linestyle
def get_linewidth(self): return self._linewidth
def get_marker(self): return self._marker
def get_markeredgecolor(self):
if (is_string_like(self._markeredgecolor) and
self._markeredgecolor == 'auto'):
if self._marker in self.filled_markers:
return 'k'
else:
return self._color
else:
return self._markeredgecolor
return self._markeredgecolor
def get_markeredgewidth(self): return self._markeredgewidth
def get_markerfacecolor(self):
if (self._markerfacecolor is None or
(is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower()=='none') ):
return self._markerfacecolor
elif (is_string_like(self._markerfacecolor) and
self._markerfacecolor.lower() == 'auto'):
return self._color
else:
return self._markerfacecolor
def get_markersize(self): return self._markersize
def get_data(self, orig=True):
"""
Return the xdata, ydata.
If *orig* is *True*, return the original data
"""
return self.get_xdata(orig=orig), self.get_ydata(orig=orig)
def get_xdata(self, orig=True):
"""
Return the xdata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._xorig
if self._invalid:
self.recache()
return self._x
def get_ydata(self, orig=True):
"""
Return the ydata.
If *orig* is *True*, return the original data, else the
processed data.
"""
if orig:
return self._yorig
if self._invalid:
self.recache()
return self._y
def get_path(self):
"""
Return the :class:`~matplotlib.path.Path` object associated
with this line.
"""
if self._invalid:
self.recache()
return self._path
def get_xydata(self):
"""
Return the *xy* data as a Nx2 numpy array.
"""
if self._invalid:
self.recache()
return self._xy
def set_antialiased(self, b):
"""
True if line should be drawin with antialiased rendering
ACCEPTS: [True | False]
"""
self._antialiased = b
def set_color(self, color):
"""
Set the color of the line
ACCEPTS: any matplotlib color
"""
self._color = color
def set_drawstyle(self, drawstyle):
"""
Set the drawstyle of the plot
'default' connects the points with lines. The steps variants
produce step-plots. 'steps' is equivalent to 'steps-pre' and
is maintained for backward-compatibility.
ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ]
"""
self._drawstyle = drawstyle
def set_linewidth(self, w):
"""
Set the line width in points
ACCEPTS: float value in points
"""
self._linewidth = w
def set_linestyle(self, linestyle):
"""
Set the linestyle of the line (also accepts drawstyles)
================ =================
linestyle description
================ =================
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
================ =================
'steps' is equivalent to 'steps-pre' and is maintained for
backward-compatibility.
.. seealso::
:meth:`set_drawstyle`
ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and
any drawstyle in combination with a linestyle, e.g. 'steps--'.
"""
# handle long drawstyle names before short ones !
for ds in flatten([k.keys() for k in (self._drawStyles_l,
self._drawStyles_s)], is_string_like):
if linestyle.startswith(ds):
self.set_drawstyle(ds)
if len(linestyle) > len(ds):
linestyle = linestyle[len(ds):]
else:
linestyle = '-'
if linestyle not in self._lineStyles:
if linestyle in ls_mapper:
linestyle = ls_mapper[linestyle]
else:
verbose.report('Unrecognized line style %s, %s' %
(linestyle, type(linestyle)))
if linestyle in [' ','']:
linestyle = 'None'
self._linestyle = linestyle
def set_marker(self, marker):
"""
Set the line marker
========== ==========================
marker description
========== ==========================
'.' point
',' pixel
'o' circle
'v' triangle_down
'^' triangle_up
'<' triangle_left
'>' triangle_right
'1' tri_down
'2' tri_up
'3' tri_left
'4' tri_right
's' square
'p' pentagon
'*' star
'h' hexagon1
'H' hexagon2
'+' plus
'x' x
'D' diamond
'd' thin_diamond
'|' vline
'_' hline
TICKLEFT tickleft
TICKRIGHT tickright
TICKUP tickup
TICKDOWN tickdown
CARETLEFT caretleft
CARETRIGHT caretright
CARETUP caretup
CARETDOWN caretdown
'None' nothing
' ' nothing
'' nothing
========== ==========================
ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4'
| '<' | '>' | 'D' | 'H' | '^' | '_' | 'd'
| 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|'
| TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT
| 'None' | ' ' | '' ]
"""
if marker not in self._markers:
verbose.report('Unrecognized marker style %s, %s' %
(marker, type(marker)))
if marker in [' ','']:
marker = 'None'
self._marker = marker
self._markerFunc = self._markers[marker]
def set_markeredgecolor(self, ec):
"""
Set the marker edge color
ACCEPTS: any matplotlib color
"""
if ec is None :
ec = 'auto'
self._markeredgecolor = ec
def set_markeredgewidth(self, ew):
"""
Set the marker edge width in points
ACCEPTS: float value in points
"""
if ew is None :
ew = rcParams['lines.markeredgewidth']
self._markeredgewidth = ew
def set_markerfacecolor(self, fc):
"""
Set the marker face color
ACCEPTS: any matplotlib color
"""
if fc is None :
fc = 'auto'
self._markerfacecolor = fc
def set_markersize(self, sz):
"""
Set the marker size in points
ACCEPTS: float
"""
self._markersize = sz
def set_xdata(self, x):
"""
Set the data np.array for x
ACCEPTS: 1D array
"""
x = np.asarray(x)
self.set_data(x, self._yorig)
def set_ydata(self, y):
"""
Set the data np.array for y
ACCEPTS: 1D array
"""
y = np.asarray(y)
self.set_data(self._xorig, y)
def set_dashes(self, seq):
"""
Set the dash sequence, sequence of dashes with on off ink in
points. If seq is empty or if seq = (None, None), the
linestyle will be set to solid.
ACCEPTS: sequence of on/off ink in points
"""
if seq == (None, None) or len(seq)==0:
self.set_linestyle('-')
else:
self.set_linestyle('--')
self._dashSeq = seq # TODO: offset ignored for now
def _draw_lines(self, renderer, gc, path, trans):
self._lineFunc(renderer, gc, path, trans)
def _draw_steps_pre(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0]
steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_post(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices)-1, 2), np.float_)
steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_steps_mid(self, renderer, gc, path, trans):
vertices = self._xy
steps = ma.zeros((2*len(vertices), 2), np.float_)
steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0])
steps[0, 0] = vertices[0, 0]
steps[-1, 0] = vertices[-1, 0]
steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1]
path = Path(steps)
path = path.transformed(self.get_transform())
self._lineFunc(renderer, gc, path, IdentityTransform())
def _draw_nothing(self, *args, **kwargs):
pass
def _draw_solid(self, renderer, gc, path, trans):
gc.set_linestyle('solid')
renderer.draw_path(gc, path, trans)
def _draw_dashed(self, renderer, gc, path, trans):
gc.set_linestyle('dashed')
if self._dashSeq is not None:
gc.set_dashes(0, self._dashSeq)
renderer.draw_path(gc, path, trans)
def _draw_dash_dot(self, renderer, gc, path, trans):
gc.set_linestyle('dashdot')
renderer.draw_path(gc, path, trans)
def _draw_dotted(self, renderer, gc, path, trans):
gc.set_linestyle('dotted')
renderer.draw_path(gc, path, trans)
def _draw_point(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * \
self._point_size_reduction * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_draw_pixel_transform = Affine2D().translate(-0.5, -0.5)
def _draw_pixel(self, renderer, gc, path, path_trans):
rgbFace = self._get_rgb_face()
gc.set_snap(False)
renderer.draw_markers(gc, Path.unit_rectangle(),
self._draw_pixel_transform,
path, path_trans, rgbFace)
def _draw_circle(self, renderer, gc, path, path_trans):
w = renderer.points_to_pixels(self._markersize) * 0.5
gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0)
rgbFace = self._get_rgb_face()
transform = Affine2D().scale(w, w)
renderer.draw_markers(
gc, Path.unit_circle(), transform, path, path_trans,
rgbFace)
_triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]])
def _draw_triangle_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, -offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_triangle_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset, offset).rotate_deg(-90)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, self._triangle_path, transform,
path, path_trans, rgbFace)
def _draw_square(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
side = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_thin_diamond(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = renderer.points_to_pixels(self._markersize)
transform = Affine2D().translate(-0.5, -0.5) \
.rotate_deg(45).scale(offset * 0.6, offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_rectangle(), transform,
path, path_trans, rgbFace)
def _draw_pentagon(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform,
path, path_trans, rgbFace)
def _draw_star(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
_starpath = Path.unit_regular_star(5, innerCircle=0.381966)
renderer.draw_markers(gc, _starpath, transform,
path, path_trans, rgbFace)
def _draw_hexagon1(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
def _draw_hexagon2(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5 * renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(30)
rgbFace = self._get_rgb_face()
renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform,
path, path_trans, rgbFace)
_line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]])
def _draw_vline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
def _draw_hline(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._line_marker_path, transform,
path, path_trans)
_tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]])
def _draw_tickleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(-offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
def _draw_tickright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(offset, 1.0)
renderer.draw_markers(gc, self._tickhoriz_path, marker_transform,
path, path_trans)
_tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]])
def _draw_tickup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
def _draw_tickdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0)
offset = renderer.points_to_pixels(self._markersize)
marker_transform = Affine2D().scale(1.0, -offset)
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
path, path_trans)
_plus_path = Path([[-1.0, 0.0], [1.0, 0.0],
[0.0, -1.0], [0.0, 1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_plus(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._plus_path, transform,
path, path_trans)
_tri_path = Path([[0.0, 0.0], [0.0, -1.0],
[0.0, 0.0], [0.8, 0.5],
[0.0, 0.0], [-0.8, 0.5]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_tri_down(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_up(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_left(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
def _draw_tri_right(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._tri_path, transform,
path, path_trans)
_caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]])
def _draw_caretdown(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretup(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(180)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretleft(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(270)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
def _draw_caretright(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset).rotate_deg(90)
renderer.draw_markers(gc, self._caret_path, transform,
path, path_trans)
_x_path = Path([[-1.0, -1.0], [1.0, 1.0],
[-1.0, 1.0], [1.0, -1.0]],
[Path.MOVETO, Path.LINETO,
Path.MOVETO, Path.LINETO])
def _draw_x(self, renderer, gc, path, path_trans):
gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0)
offset = 0.5*renderer.points_to_pixels(self._markersize)
transform = Affine2D().scale(offset)
renderer.draw_markers(gc, self._x_path, transform,
path, path_trans)
def update_from(self, other):
'copy properties from other to self'
Artist.update_from(self, other)
self._linestyle = other._linestyle
self._linewidth = other._linewidth
self._color = other._color
self._markersize = other._markersize
self._markerfacecolor = other._markerfacecolor
self._markeredgecolor = other._markeredgecolor
self._markeredgewidth = other._markeredgewidth
self._dashSeq = other._dashSeq
self._dashcapstyle = other._dashcapstyle
self._dashjoinstyle = other._dashjoinstyle
self._solidcapstyle = other._solidcapstyle
self._solidjoinstyle = other._solidjoinstyle
self._linestyle = other._linestyle
self._marker = other._marker
self._drawstyle = other._drawstyle
def _get_rgb_face(self):
facecolor = self.get_markerfacecolor()
if is_string_like(facecolor) and facecolor.lower()=='none':
rgbFace = None
else:
rgbFace = colorConverter.to_rgb(facecolor)
return rgbFace
# some aliases....
def set_aa(self, val):
'alias for set_antialiased'
self.set_antialiased(val)
def set_c(self, val):
'alias for set_color'
self.set_color(val)
def set_ls(self, val):
'alias for set_linestyle'
self.set_linestyle(val)
def set_lw(self, val):
'alias for set_linewidth'
self.set_linewidth(val)
def set_mec(self, val):
'alias for set_markeredgecolor'
self.set_markeredgecolor(val)
def set_mew(self, val):
'alias for set_markeredgewidth'
self.set_markeredgewidth(val)
def set_mfc(self, val):
'alias for set_markerfacecolor'
self.set_markerfacecolor(val)
def set_ms(self, val):
'alias for set_markersize'
self.set_markersize(val)
def get_aa(self):
'alias for get_antialiased'
return self.get_antialiased()
def get_c(self):
'alias for get_color'
return self.get_color()
def get_ls(self):
'alias for get_linestyle'
return self.get_linestyle()
def get_lw(self):
'alias for get_linewidth'
return self.get_linewidth()
def get_mec(self):
'alias for get_markeredgecolor'
return self.get_markeredgecolor()
def get_mew(self):
'alias for get_markeredgewidth'
return self.get_markeredgewidth()
def get_mfc(self):
'alias for get_markerfacecolor'
return self.get_markerfacecolor()
def get_ms(self):
'alias for get_markersize'
return self.get_markersize()
def set_dash_joinstyle(self, s):
"""
Set the join style for dashed linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._dashjoinstyle = s
def set_solid_joinstyle(self, s):
"""
Set the join style for solid linestyles
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,)
+ 'valid joinstyles are %s' % (self.validJoin,))
self._solidjoinstyle = s
def get_dash_joinstyle(self):
"""
Get the join style for dashed linestyles
"""
return self._dashjoinstyle
def get_solid_joinstyle(self):
"""
Get the join style for solid linestyles
"""
return self._solidjoinstyle
def set_dash_capstyle(self, s):
"""
Set the cap style for dashed linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_dash_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._dashcapstyle = s
def set_solid_capstyle(self, s):
"""
Set the cap style for solid linestyles
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_solid_capstyle passed "%s";\n' % (s,)
+ 'valid capstyles are %s' % (self.validCap,))
self._solidcapstyle = s
def get_dash_capstyle(self):
"""
Get the cap style for dashed linestyles
"""
return self._dashcapstyle
def get_solid_capstyle(self):
"""
Get the cap style for solid linestyles
"""
return self._solidcapstyle
def is_dashed(self):
'return True if line is dashstyle'
return self._linestyle in ('--', '-.', ':')
class VertexSelector:
"""
Manage the callbacks to maintain a list of selected vertices for
:class:`matplotlib.lines.Line2D`. Derived classes should override
:meth:`~matplotlib.lines.VertexSelector.process_selected` to do
something with the picks.
Here is an example which highlights the selected verts with red
circles::
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
class HighlightSelected(lines.VertexSelector):
def __init__(self, line, fmt='ro', **kwargs):
lines.VertexSelector.__init__(self, line)
self.markers, = self.axes.plot([], [], fmt, **kwargs)
def process_selected(self, ind, xs, ys):
self.markers.set_data(xs, ys)
self.canvas.draw()
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = np.random.rand(2, 30)
line, = ax.plot(x, y, 'bs-', picker=5)
selector = HighlightSelected(line)
plt.show()
"""
def __init__(self, line):
"""
Initialize the class with a :class:`matplotlib.lines.Line2D`
instance. The line should already be added to some
:class:`matplotlib.axes.Axes` instance and should have the
picker property set.
"""
if not hasattr(line, 'axes'):
raise RuntimeError('You must first add the line to the Axes')
if line.get_picker() is None:
raise RuntimeError('You must first set the picker property of the line')
self.axes = line.axes
self.line = line
self.canvas = self.axes.figure.canvas
self.cid = self.canvas.mpl_connect('pick_event', self.onpick)
self.ind = set()
def process_selected(self, ind, xs, ys):
"""
Default "do nothing" implementation of the
:meth:`process_selected` method.
*ind* are the indices of the selected vertices. *xs* and *ys*
are the coordinates of the selected vertices.
"""
pass
def onpick(self, event):
'When the line is picked, update the set of selected indicies.'
if event.artist is not self.line: return
for i in event.ind:
if i in self.ind:
self.ind.remove(i)
else:
self.ind.add(i)
ind = list(self.ind)
ind.sort()
xdata, ydata = self.line.get_data()
self.process_selected(ind, xdata[ind], ydata[ind])
lineStyles = Line2D._lineStyles
lineMarkers = Line2D._markers
drawStyles = Line2D.drawStyles
artist.kwdocd['Line2D'] = artist.kwdoc(Line2D)
# You can not set the docstring of an instancemethod,
# but you can on the underlying function. Go figure.
Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
| agpl-3.0 |
yarikoptic/pystatsmodels | statsmodels/sandbox/examples/ex_random_panel.py | 3 | 5995 | # -*- coding: utf-8 -*-
"""
Created on Fri May 18 13:05:47 2012
Author: Josef Perktold
moved example from main of random_panel
"""
import numpy as np
from statsmodels.sandbox.panel.panel_short import ShortPanelGLS, ShortPanelGLS2
from statsmodels.sandbox.panel.random_panel import PanelSample
import statsmodels.sandbox.panel.correlation_structures as cs
import statsmodels.stats.sandwich_covariance as sw
#from statsmodels.stats.sandwich_covariance import (
# S_hac_groupsum, weights_bartlett, _HCCM2)
from statsmodels.stats.moment_helpers import cov2corr, se_cov
cov_nw_panel2 = sw.cov_nw_groupsum
examples = ['ex1']
if 'ex1' in examples:
nobs = 100
nobs_i = 5
n_groups = nobs // nobs_i
k_vars = 3
# dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_equi,
# corr_args=(0.6,))
# dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_ar,
# corr_args=([1, -0.95],))
dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_arma,
corr_args=([1], [1., -0.9],), seed=377769)
print 'seed', dgp.seed
y = dgp.generate_panel()
noise = y - dgp.y_true
print np.corrcoef(y.reshape(-1,n_groups, order='F'))
print np.corrcoef(noise.reshape(-1,n_groups, order='F'))
mod = ShortPanelGLS2(y, dgp.exog, dgp.groups)
res = mod.fit()
print res.params
print res.bse
#Now what?
#res.resid is of transformed model
#np.corrcoef(res.resid.reshape(-1,n_groups, order='F'))
y_pred = np.dot(mod.exog, res.params)
resid = y - y_pred
print np.corrcoef(resid.reshape(-1,n_groups, order='F'))
print resid.std()
err = y_pred - dgp.y_true
print err.std()
#OLS standard errors are too small
mod.res_pooled.params
mod.res_pooled.bse
#heteroscedasticity robust doesn't help
mod.res_pooled.HC1_se
#compare with cluster robust se
print sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int)))
#not bad, pretty close to panel estimator
#and with Newey-West Hac
print sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx))
#too small, assuming no bugs,
#see Peterson assuming it refers to same kind of model
print dgp.cov
mod2 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res2 = mod2.fit_iterative(2)
print res2.params
print res2.bse
#both implementations produce the same results:
from numpy.testing import assert_almost_equal
assert_almost_equal(res.params, res2.params, decimal=12)
assert_almost_equal(res.bse, res2.bse, decimal=13)
mod5 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res5 = mod5.fit_iterative(5)
print res5.params
print res5.bse
#fitting once is the same as OLS
#note: I need to create new instance, otherwise it continuous fitting
mod1 = ShortPanelGLS(y, dgp.exog, dgp.groups)
res1 = mod1.fit_iterative(1)
res_ols = mod1._fit_ols()
assert_almost_equal(res1.params, res_ols.params, decimal=12)
assert_almost_equal(res1.bse, res_ols.bse, decimal=13)
#cov_hac_panel with uniform_kernel is the same as cov_cluster for balanced
#panel with full length kernel
#I fixe default correction to be equal
mod2._fit_ols()
cov_clu = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int))
clubse = se_cov(cov_clu)
cov_uni = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx,
weights_func=sw.weights_uniform,
use_correction='cluster')
assert_almost_equal(cov_uni, cov_clu, decimal=7)
#without correction
cov_clu2 = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int),
use_correction=False)
cov_uni2 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx,
weights_func=sw.weights_uniform,
use_correction=False)
assert_almost_equal(cov_uni2, cov_clu2, decimal=8)
cov_white = sw.cov_white_simple(mod2.res_pooled)
cov_pnw0 = sw.cov_nw_panel(mod2.res_pooled, 0, mod2.group.groupidx,
use_correction='hac')
assert_almost_equal(cov_pnw0, cov_white, decimal=13)
time = np.tile(np.arange(nobs_i), n_groups)
#time = mod2.group.group_int
cov_pnw1 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx)
cov_pnw2 = cov_nw_panel2(mod2.res_pooled, 4, time)
#s = sw.group_sums(x, time)
c2, ct, cg = sw.cov_cluster_2groups(mod2.res_pooled, time, dgp.groups.astype(int), use_correction=False)
ct_nw0 = cov_nw_panel2(mod2.res_pooled, 0, time, weights_func=sw.weights_uniform, use_correction=False)
cg_nw0 = cov_nw_panel2(mod2.res_pooled, 0, dgp.groups.astype(int), weights_func=sw.weights_uniform, use_correction=False)
assert_almost_equal(ct_nw0, ct, decimal=13)
assert_almost_equal(cg_nw0, cg, decimal=13) #pnw2 0 lags
assert_almost_equal(cov_clu2, cg, decimal=13)
assert_almost_equal(cov_uni2, cg, decimal=8) #pnw all lags
import pandas as pa
#pandas.DataFrame doesn't do inplace append
se = pa.DataFrame(res_ols.bse[None,:], index=['OLS'])
se = se.append(pa.DataFrame(res5.bse[None,:], index=['PGLSit5']))
clbse = sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int)))
se = se.append(pa.DataFrame(clbse[None,:], index=['OLSclu']))
pnwse = sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx))
se = se.append(pa.DataFrame(pnwse[None,:], index=['OLSpnw']))
print se
#list(se.index)
from statsmodels.iolib.table import SimpleTable
headers = [str(i) for i in se.columns]
stubs=list(se.index)
# print SimpleTable(np.round(np.asarray(se), 4),
# headers=headers,
# stubs=stubs)
print SimpleTable(np.asarray(se), headers=headers, stubs=stubs,
txt_fmt=dict(data_fmts=['%10.4f']),
title='Standard Errors')
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/datasets/samples_generator.py | 10 | 55091 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
NigelCleland/scada_wind | scada_wind/analysis.py | 1 | 1585 | import pandas as pd
import numpy as np
import datetime
import glob
import os
import matplotlib.pyplot as plt
def load_series(fName, column):
""" Load a single series from the Filename with the index set by Epoc
Seconds
"""
df = pd.read_csv(fName)
df.index = df["Epoc Seconds"]
return df[column].copy()
def load_folder(directory, farm="West Wind", column=None):
all_files = []
for fdir, subdir, files in os.walk(directory):
if farm.replace(' ', '') in fdir:
for f in files:
all_files.append(os.path.join(fdir, f))
return pd.concat([load_series(f, column) for f in all_files], axis=1)
def stream_plot(df):
fig, axes = plt.subplots(1,1, figsize=(16,9))
for q in [98, 95, 90, 75, 50, 25, 10, 5, 2]:
label = "%s Percentile" % q
l = df.apply(np.percentile, q=q, axis=1).plot(ax=axes, label=label)
axes.legend()
return fig, axes
def process_and_plot(directory, column="Cumulative Deviation"):
wind_farms = ("West Wind", "Tararua", "Te Apiti", "All Tararua",
"Tararua WC", "White Hill", "Mahinerangi", "Te Uku",
"All", "North Island", "South Island")
for farm in wind_farms:
df = load_folder(directory, farm=farm, column=column)
fig, axes = stream_plot(df)
axes.set_title(farm)
axes.set_xlabel("Time Since Epoc [s]", fontsize=16)
axes.set_ylabel(column)
axes.set_xlim(0, 300)
savename = " ".join([farm, column])
fig.savefig(savename, dpi=100)
plt.close()
| mit |
mugizico/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
datapythonista/pandas | pandas/tests/extension/test_categorical.py | 3 | 9574 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
def make_data():
while True:
values = np.random.choice(list(string.ascii_letters), size=100)
# ensure we meet the requirements
# 1. first two not null
# 2. first and second are different
if values[0] != values[1]:
break
return values
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 array for this type.
* data[0] and data[1] should both be non missing
* data[0] and data[1] should not gbe equal
"""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, "A"])
@pytest.fixture
def data_for_sorting():
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
assert data_missing[0] in data_missing
# check the presence of na_value
assert na_value in data_missing
assert na_value not in data
# Categoricals can contain other nan-likes than na_value
for na_value_obj in tm.NULL_OBJECTS:
if na_value_obj is na_value:
continue
assert na_value_obj not in data
assert na_value_obj in data_missing # this line differs from super method
class TestConstructors(base.BaseConstructorsTests):
def test_empty(self, dtype):
cls = dtype.construct_array_type()
result = cls._empty((4,), dtype=dtype)
assert isinstance(result, cls)
# the dtype we passed is not initialized, so will not match the
# dtype on our result.
assert result.dtype == CategoricalDtype([])
class TestReshaping(base.BaseReshapingTests):
pass
class TestGetitem(base.BaseGetitemTests):
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
super().test_fillna_length_mismatch(data_missing)
class TestCasting(base.BaseCastingTests):
@pytest.mark.parametrize("cls", [Categorical, CategoricalIndex])
@pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), pd.NaT]])
def test_cast_nan_to_int(self, cls, values):
# GH 28406
s = cls(values)
msg = "Cannot (cast|convert)"
with pytest.raises((ValueError, TypeError), match=msg):
s.astype(int)
@pytest.mark.parametrize(
"expected",
[
pd.Series(["2019", "2020"], dtype="datetime64[ns, UTC]"),
pd.Series([0, 0], dtype="timedelta64[ns]"),
pd.Series([pd.Period("2019"), pd.Period("2020")], dtype="period[A-DEC]"),
pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval"),
pd.Series([1, np.nan], dtype="Int64"),
],
)
def test_cast_category_to_extension_dtype(self, expected):
# GH 28668
result = expected.astype("category").astype(expected.dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype, expected",
[
(
"datetime64[ns]",
np.array(["2015-01-01T00:00:00.000000000"], dtype="datetime64[ns]"),
),
(
"datetime64[ns, MET]",
pd.DatetimeIndex(
[Timestamp("2015-01-01 00:00:00+0100", tz="MET")]
).array,
),
],
)
def test_consistent_casting(self, dtype, expected):
# GH 28448
result = Categorical(["2015-01-01"]).astype(dtype)
assert result == expected
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
# frame & scalar
op_name = all_arithmetic_operators
if op_name == "__rmod__":
request.node.add_marker(
pytest.mark.xfail(
reason="rmod never called when string is first argument"
)
)
super().test_arith_frame_with_scalar(data, op_name)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
op_name = all_arithmetic_operators
if op_name == "__rmod__":
request.node.add_marker(
pytest.mark.xfail(
reason="rmod never called when string is first argument"
)
)
super().test_arith_series_with_scalar(data, op_name)
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="cannot perform|unsupported operand"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
assert (result == expected).all()
elif op_name == "__ne__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x != y)
assert (result == expected).all()
else:
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
op(data, other)
@pytest.mark.parametrize(
"categories",
[["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]],
)
def test_not_equal_with_na(self, categories):
# https://github.com/pandas-dev/pandas/issues/32276
c1 = Categorical.from_codes([-1, 0], categories=categories)
c2 = Categorical.from_codes([0, 1], categories=categories)
result = c1 != c2
assert result.all()
class TestParsing(base.BaseParsingTests):
pass
| bsd-3-clause |
mkhuthir/learnPython | Book_pythonlearn_com/25_matplotlib/logo.py | 1 | 2448 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['axes.edgecolor'] = 'gray'
axalpha = 0.05
figcolor = 'white'
dpi = 80
fig = plt.figure(figsize=(6, 1.1), dpi=dpi)
fig.patch.set_edgecolor(figcolor)
fig.patch.set_facecolor(figcolor)
def add_math_background():
ax = fig.add_axes([0., 0., 1., 1.])
text = []
text.append(
(r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = "
r"U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2}"
r"\int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 "
r"\left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - "
r"\alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} "
r"}{U^{0\beta}_{\rho_1 \sigma_2}}\right]$", (0.7, 0.2), 20))
text.append((r"$\frac{d\rho}{d t} + \rho \vec{v}\cdot\nabla\vec{v} "
r"= -\nabla p + \mu\nabla^2 \vec{v} + \rho \vec{g}$",
(0.35, 0.9), 20))
text.append((r"$\int_{-\infty}^\infty e^{-x^2}dx=\sqrt{\pi}$",
(0.15, 0.3), 25))
text.append((r"$F_G = G\frac{m_1m_2}{r^2}$",
(0.85, 0.7), 30))
for eq, (x, y), size in text:
ax.text(x, y, eq, ha='center', va='center', color="#11557c",
alpha=0.25, transform=ax.transAxes, fontsize=size)
ax.set_axis_off()
return ax
def add_matplotlib_text(ax):
ax.text(0.95, 0.5, 'matplotlib', color='#11557c', fontsize=65,
ha='right', va='center', alpha=1.0, transform=ax.transAxes)
def add_polar_bar():
ax = fig.add_axes([0.025, 0.075, 0.2, 0.85], projection='polar')
ax.patch.set_alpha(axalpha)
ax.set_axisbelow(True)
N = 7
arc = 2. * np.pi
theta = np.arange(0.0, arc, arc/N)
radii = 10 * np.array([0.2, 0.6, 0.8, 0.7, 0.4, 0.5, 0.8])
width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3])
bars = ax.bar(theta, radii, width=width, bottom=0.0)
for r, bar in zip(radii, bars):
bar.set_facecolor(cm.jet(r/10.))
bar.set_alpha(0.6)
ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
ax.grid(lw=0.8, alpha=0.9, ls='-', color='0.5')
ax.set_yticks(np.arange(1, 9, 2))
ax.set_rmax(9)
if __name__ == '__main__':
main_axes = add_math_background()
add_polar_bar()
add_matplotlib_text(main_axes)
plt.show()
| mit |
sarvex/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 41 | 1293 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""pandas_io python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator.inputs import pandas_io
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
pandas_io.__all__ = [s for s in dir(pandas_io) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.inputs.pandas_io import *
| apache-2.0 |
AlirezaShahabi/zipline | zipline/data/loader.py | 22 | 12751 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from collections import OrderedDict
from datetime import timedelta
import logbook
import pandas as pd
from pandas.io.data import DataReader
import pytz
from six import iteritems
from . import benchmarks
from . benchmarks import get_benchmark_returns
from .paths import (
cache_root,
data_root,
)
from zipline.utils.tradingcalendar import trading_day as trading_day_nyse
from zipline.utils.tradingcalendar import trading_days as trading_days_nyse
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
('treasuries', 'treasury_curves.csv', 'data.treasury.gov'),
'^GSPTSE':
('treasuries_can', 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
('treasuries', 'treasury_curves.csv', 'data.treasury.gov'),
}
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def dump_treasury_curves(module='treasuries', filename='treasury_curves.csv'):
"""
Dumps data to be used with zipline.
Puts source treasury and data into zipline.
"""
try:
m = importlib.import_module("." + module, package='zipline.data')
except ImportError:
raise NotImplementedError(
'Treasury curve {0} module not implemented'.format(module))
tr_data = {}
for curve in m.get_treasury_data():
# Not ideal but massaging data into expected format
tr_data[curve['date']] = curve
curves = pd.DataFrame(tr_data).T
data_filepath = get_data_filepath(filename)
curves.to_csv(data_filepath)
return curves
def dump_benchmarks(symbol):
"""
Dumps data to be used with zipline.
Puts source treasury and data into zipline.
"""
benchmark_data = []
for daily_return in get_benchmark_returns(symbol):
# Not ideal but massaging data into expected format
benchmark = (daily_return.date, daily_return.returns)
benchmark_data.append(benchmark)
data_filepath = get_data_filepath(get_benchmark_filename(symbol))
benchmark_returns = pd.Series(dict(benchmark_data))
benchmark_returns.to_csv(data_filepath)
def update_benchmarks(symbol, last_date):
"""
Updates data in the zipline message pack
last_date should be a datetime object of the most recent data
Puts source benchmark into zipline.
"""
datafile = get_data_filepath(get_benchmark_filename(symbol))
saved_benchmarks = pd.Series.from_csv(datafile)
try:
start = last_date + timedelta(days=1)
for daily_return in get_benchmark_returns(symbol, start_date=start):
# Not ideal but massaging data into expected format
benchmark = pd.Series({daily_return.date: daily_return.returns})
saved_benchmarks = saved_benchmarks.append(benchmark)
datafile = get_data_filepath(get_benchmark_filename(symbol))
saved_benchmarks.to_csv(datafile)
except benchmarks.BenchmarkDataNotFoundError as exc:
logger.warn(exc)
return saved_benchmarks
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse, bm_symbol='^GSPC'):
bm_filepath = get_data_filepath(get_benchmark_filename(bm_symbol))
try:
saved_benchmarks = pd.Series.from_csv(bm_filepath)
except (OSError, IOError, ValueError):
logger.info(
"No cache found at {path}. "
"Downloading benchmark data for '{symbol}'.",
symbol=bm_symbol,
path=bm_filepath,
)
dump_benchmarks(bm_symbol)
saved_benchmarks = pd.Series.from_csv(bm_filepath)
saved_benchmarks = saved_benchmarks.tz_localize('UTC')
most_recent = pd.Timestamp('today', tz='UTC') - trading_day
most_recent_index = trading_days.searchsorted(most_recent)
days_up_to_now = trading_days[:most_recent_index + 1]
# Find the offset of the last date for which we have trading data in our
# list of valid trading days
last_bm_date = saved_benchmarks.index[-1]
last_bm_date_offset = days_up_to_now.searchsorted(
last_bm_date.strftime('%Y/%m/%d'))
# If more than 1 trading days has elapsed since the last day where
# we have data,then we need to update
# We're doing "> 2" rather than "> 1" because we're subtracting an array
# _length_ from an array _index_, and therefore even if we had data up to
# and including the current day, the difference would still be 1.
if len(days_up_to_now) - last_bm_date_offset > 2:
benchmark_returns = update_benchmarks(bm_symbol, last_bm_date)
if benchmark_returns.index.tz is None or \
benchmark_returns.index.tz.zone != 'UTC':
benchmark_returns = benchmark_returns.tz_localize('UTC')
else:
benchmark_returns = saved_benchmarks
if benchmark_returns.index.tz is None or\
benchmark_returns.index.tz.zone != 'UTC':
benchmark_returns = benchmark_returns.tz_localize('UTC')
# Get treasury curve module, filename & source from mapping.
# Default to USA.
module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC'])
tr_filepath = get_data_filepath(filename)
try:
saved_curves = pd.DataFrame.from_csv(tr_filepath)
except (OSError, IOError, ValueError):
logger.info(
"No cache found at {path}. "
"Downloading treasury data from {source}.",
path=tr_filepath,
source=source,
)
dump_treasury_curves(module, filename)
saved_curves = pd.DataFrame.from_csv(tr_filepath)
# Find the offset of the last date for which we have trading data in our
# list of valid trading days
last_tr_date = saved_curves.index[-1]
last_tr_date_offset = days_up_to_now.searchsorted(
last_tr_date.strftime('%Y/%m/%d'))
# If more than 1 trading days has elapsed since the last day where
# we have data,then we need to update
# Comment above explains why this is "> 2".
if len(days_up_to_now) - last_tr_date_offset > 2:
treasury_curves = dump_treasury_curves(module, filename)
else:
treasury_curves = saved_curves.tz_localize('UTC')
return benchmark_returns, treasury_curves
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
print(stock)
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
print(name)
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| apache-2.0 |
YeEmrick/learning | cs231/assignment/assignment2/experiments/FirstConvNet/FirstConvNet.py | 1 | 3935 | import os
import json
DIR_CS231n = '/Users/clement/Documents/MLearning/CS231/assignment2/'
import sys
from sklearn.externals import joblib
sys.path.append(DIR_CS231n)
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.cnn import FirstConvNet
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.solver import SolverCheckpoints
if __name__ == "__main__":
conf_file = sys.argv[1]
lr = sys.argv[2]
rg = sys.argv[3]
with open(conf_file, 'r') as f:
conf = json.load(f)
# Parameter extraction
# Hyperparameter
# lr = conf['lr']
# rg = conf['reg']
# Model instance
input_dim = tuple(conf.get('input_dim', (3, 32, 32)))
num_filters = conf.get('num_filters', 32)
filter_size = conf.get('filter_size', 7)
hidden_dims = conf.get('hidden_dim', 100)
num_classes = conf.get('num_classes', 10)
weight_scale = conf.get('weight_scale', 1e-3)
reg = conf.get('reg', float(rg))
dtype = conf.get('dtype', np.float32)
use_batchnorm = conf.get('use_batchnorm', True)
# Solver instance
update_rule = conf.get('update_rule', 'adam')
optim_config = conf.get('optim_config', {'learning_rate': float(lr)})
lr_decay = conf.get('lr_decay', 1.0)
batch_size = conf.get('batch_size', 100)
num_epochs = conf.get('num_epochs', 10)
print_every = conf.get('print_every', 10)
verbose = conf.get('verbose', True)
path = conf.get('path', '')
check_points_every = conf.get('check_points_every', 1)
if path == '':
raise ValueError('You have to set a path where \
the model is suppose to run')
# Create a folder for a specific lr,reg
# Initialize the folder that contain this code
name_folder = 'lr' + str(lr) + '_reg' + str(reg)
folder = os.path.join(path, name_folder)
os.mkdir(folder)
os.mkdir(os.path.join(folder, 'checkpoints'))
init_checkpoint = {'model': '',
'epoch': 0,
'best_val_acc': 0,
'best_params': '',
'best_val_acc': 0,
'loss_history': [],
'train_acc_history': [],
'val_acc_history': []}
name = 'check_0'
os.mkdir(os.path.join(folder, 'checkpoints', name))
joblib.dump(init_checkpoint, os.path.join(
folder, 'checkpoints', name, name + '.pkl'))
path = folder
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data(DIR_CS231n)
for k, v in data.iteritems():
print '%s: ' % k, v.shape
print 'The parameters are: '
for key, value in conf.iteritems():
print key + ': ', value, ' \n'
# Initialize the model instance
model = FirstConvNet(input_dim=input_dim,
num_filters=num_filters,
filter_size=filter_size,
hidden_dims=hidden_dims,
num_classes=num_classes,
weight_scale=weight_scale,
reg=reg,
dtype=dtype,
use_batchnorm=use_batchnorm)
# Run the training
solver = SolverCheckpoints(model=model,
data=data,
path=path,
update_rule=update_rule,
optim_config=optim_config,
lr_decay=lr_decay,
batch_size=batch_size,
num_epochs=num_epochs,
print_every=print_every,
check_points_every=check_points_every,
verbose=verbose)
solver.train()
| apache-2.0 |
xwolf12/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
kristianeschenburg/parcellearning | parcellearning/utilities/gnnio.py | 1 | 9550 | import dgl
from dgl.data import DGLDataset
from dgl import data
import torch
import os
import nibabel as nb
import numpy as np
import pandas as pd
import fragmenter
import pysurface
import argparse
import json
class GCNData(DGLDataset):
"""
Class for generating DGL dataset
Parameters
----------
subject_list: str
list of individuals to aggregate data over
data_name: str
output dataset net
url : str
URL to download the raw dataset
"""
def __init__(self,
subject_list,
data_name,
url=None,
save_subject=False,
save_subject_dir=None,
labels = {'dir': '/projects3/parcellation/data/labels/',
'extension': '.L.CorticalAreas.fixed.32k_fs_LR.label.gii'},
graphs = {'dir': '/projects3/parcellation/data/surfaces/',
'extension': '.L.midthickness.32k_fs_LR.acpc_dc.surf.gii'},
features={'regionalized': {'dir': '/projects3/parcellation/data/regionalization/Destrieux/',
'extension': '.L.aparc.a2009s.Mean.CrossCorr.csv'}}):
if save_subject and not save_subject_dir:
raise('ERROR: Output directory must be supplied when saving subject-level graphs.')
self.data_name = data_name
self.feature_map = features
self.graph_map = graphs
self.label_map = labels
self.save_subject = save_subject
self.save_subject_dir = save_subject_dir
# load provided list of training subjects
with open(subject_list, 'r') as f:
subjects = f.read().split()
self.subjects = subjects
super(GCNData, self).__init__(name='GCNData',
url=url,
raw_dir=None,
save_dir=None,
force_reload=False,
verbose=False)
def download(self):
# download raw data to local disk
pass
def process(self):
features = list(self.feature_map.keys())
features.sort()
# check which files have all of the training features and surface files
qc = []
for subject in self.subjects:
existence = 0
# check with features exists
for f in features:
filename='%s%s%s' % (self.feature_map[f]['dir'], subject, self.feature_map[f]['extension'])
if os.path.exists(filename):
existence += 1
# check surface file exists
surface_file = '%s%s%s' % (self.graph_map['dir'], subject, self.graph_map['extension'])
if os.path.exists(surface_file):
existence += 1
# check label file exists
response_file = '%s%s%s' % (self.label_map['dir'], subject, self.label_map['extension'])
if os.path.exists(response_file):
existence += 1
# append passing subject
if existence == (len(features)+2):
qc.append(subject)
# reassign list of subjects that passed
if len(qc) < len(self.subjects):
print('Warning: provided list of subjects has missing data.')
print('Only %i of %i subjects were loaded.' % (len(qc), len(self.subjects)))
else:
print('Data for all subjects provided exists')
self.subjects = qc
graphs = []
# iterate over subjects
# load training data, response data, and graph structure
for subject in self.subjects:
# load node features -- independent variables
sfeats = {}
for f in features:
filename='%s%s%s' % (self.feature_map[f]['dir'], subject, self.feature_map[f]['extension'])
ext=filename.split('.')[-1]
if ext == 'csv':
df = pd.read_csv(filename, index_col=[0])
df = np.asarray(df)
elif ext == 'gii':
df = nb.load(filename)
df = df.darrays[0].data
df = torch.tensor(df)
if df.ndim == 1:
df = df.unsqueeze(-1)
sfeats[f] = df
# load surface file -- to generate graph structure
surface_file = '%s%s%s' % (self.graph_map['dir'], subject, self.graph_map['extension'])
surface = nb.load(surface_file)
vertices = surface.darrays[0].data
faces = surface.darrays[1].data
# generate graph adjacency structure
adjacency = pysurface.matrix.adjacency(F=faces)
# load label file -- dependent variable
response_file = '%s%s%s' % (self.label_map['dir'], subject, self.label_map['extension'])
label = nb.load(response_file)
label = label.darrays[0].data
label = torch.tensor(label).long()
gidx = torch.where(label >= 1)[0]
adjacency = adjacency[gidx,:][:,gidx]
graph = dgl.from_scipy(adjacency)
for feature, data in sfeats.items():
graph.ndata[feature] = data[gidx]
graph.ndata['label'] = label[gidx]
graph.ndata['idx'] = gidx
if self.save_subject:
filename='%sgraphs/%s.L.graph.bin' % (self.save_subject_dir, subject)
dgl.save_graphs(filename=filename, g_list=graph)
graphs.append(graph)
self.graph = graphs
def __getitem__(self, idx):
# get one example by index
pass
def __len__(self):
# number of data examples
pass
def save(self):
# save processed data to directory `self.save_dir`
data.save_graphs(g_list=self.graph, filename=self.data_name)
def load(self):
# load processed data from directory `self.save_path`
return data.load_graphs(filename=self.data_name)[0]
def has_cache(self):
# check if preprocessed data already exists
return os.path.exists(self.data_name)
def standardize(dataset):
"""
Standardize the columns of the feature matrix.
Parameters:
- - - - -
dataset: torch tensor
array of features to standardize
"""
dataset = dataset.detach().numpy()
mean = np.nanmean(dataset, axis=0)
std = np.nanstd(dataset, axis=0)
dataset = (dataset - mean) / std
dataset = torch.tensor(dataset).float()
return dataset
def dataset(dSet=None,
features=['regionalized', 'spectral', 'sulcal', 'myelin', 'curv'],
atlas='glasser',
norm=True,
clean=True,
return_bad_nodes=False):
"""
Load datasets that can be plugged in directly to GNN models.
Parameters:
- - - - -
dSet: set
path to previously computed dataset
features: list
list of variables to include in the model
atlas: str
parcellation to learn
norm: bool
standardize the columns of the features
clean: bool
remove single feature columns
"""
print('Loading dataset')
data_set = dgl.load_graphs(dSet)[0]
# select the atlas file to use
# controls which parcellation we are trying to learn
# i.e. if atlas == 'destrieux', we'll train a classifier to learn
# the destrieux regions
if atlas is not None:
for graph in data_set:
graph.ndata['label'] = graph.ndata[atlas].long()
# standardize features
if norm:
for i, graph in enumerate(data_set):
for feature in features:
temp = graph.ndata[feature]
temp = standardize(temp)
if temp.ndim == 1:
temp = temp[:,None]
graph.ndata[feature] = temp
# concatenate features, column-wise
for graph in data_set:
temp = torch.cat([graph.ndata[f] for f in features], dim=1)
graph.ndata['features'] = temp
# remove all individual features apart from the aggregation
if clean:
exfeats = [l for l in graph.ndata.keys() if l not in ['features', 'idx', 'label', 'mask']]
for i, graph in enumerate(data_set):
nodes = []
for exfeat in exfeats:
# identify any rows that are all zeros
temp = np.abs(graph.ndata[exfeat])
if temp.ndim == 1:
temp = temp[:,None]
eq_nan = (torch.isnan(temp).sum(1) > 0)
nodes.append(torch.where(eq_nan)[0])
graph.ndata.pop(exfeat)
if 'label' in graph.ndata and atlas is not None:
nodes.append(torch.where(torch.isnan(graph.ndata['label']))[0])
nodes = torch.cat(nodes, dim=0)
nodes = torch.unique(nodes)
graph.remove_nodes(nodes)
if '_ID' in graph.ndata.keys():
graph.ndata.pop('_ID')
if '_ID' in graph.edata.keys():
graph.edata.pop('_ID')
# add self loop connections
for graph in data_set:
graph = dgl.remove_self_loop(graph)
graph = dgl.add_self_loop(graph)
return data_set
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/pylab_examples/axes_zoom_effect.py | 9 | 3291 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| mit |
simon-pepin/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/series/test_validate.py | 7 | 1058 | import pytest
from pandas.core.series import Series
class TestSeriesValidate(object):
"""Tests for error handling related to data types of method arguments."""
s = Series([1, 2, 3, 4, 5])
def test_validate_bool_args(self):
# Tests for error handling related to boolean arguments.
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.s.reset_index(inplace=value)
with pytest.raises(ValueError):
self.s._set_name(name='hello', inplace=value)
with pytest.raises(ValueError):
self.s.sort_values(inplace=value)
with pytest.raises(ValueError):
self.s.sort_index(inplace=value)
with pytest.raises(ValueError):
self.s.sort_index(inplace=value)
with pytest.raises(ValueError):
self.s.rename(inplace=value)
with pytest.raises(ValueError):
self.s.dropna(inplace=value)
| mit |
jdurbin/sandbox | python/scikitlearn/survivaltest/scripts/kaggleinspired.py | 1 | 2721 | #!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
pd.set_option('display.width', 1000)
# Read data
expression = pd.read_csv("../data/vijver2002.tab",delimiter="\t")
expression = expression.transpose()
print expression
print "Expression Shape:",expression.shape
print "Expression[0]:\n",expression.iloc[0] # This is the text heading
print "Expression[1]:\n",expression.iloc[1] # This is the first numeric row
print "Expression[295]:\n",expression.iloc[295] # This is the last row
print expression.values # This includes the first row of names
# Read metadata
metadata = pd.read_csv("../data/vijver2002.clinical.t.tab",delimiter="\t")
print metadata.head(10)
print "Metadata shape:",metadata.shape # 295 x 16
# numpy array way to combine columns, output is numpy array
#survival = np.c_[metadata['ID'],metadata['TIMEsurvival']]
survival = pd.DataFrame(metadata,columns = ['ID','TIMEsurvival'])
print survival # dataframe
print "Survival shape:",survival.shape
print "expression values: ",expression.values[1:,:] # cut out column headings
print "survival.values: ",survival.values[:,1:] # cut out row labels
# Split data into test and train datasets
exp_train,exp_test,surv_train,surv_test = train_test_split(expression.values[1:,:],
survival.values[:,1:],
train_size=0.8)
print "EXP TRAIN TYPE:",type(exp_train)
print "EXP TRAIN SHAPE:",exp_train.shape # (236,9803)
#print exp_test.shape # (59,9803)
print "EXP TRAIN: \n",exp_train
print "SURV TRAIN SHAPE: ",surv_train.shape #(236,1)
print "SURV TRAIN RAVEL SHAPE: ",surv_train.ravel().shape #(236,)
print "SURV TRAIN TYPE: ",type(surv_train) # numpy.ndarray
print "SURV TRAIN: \n",surv_train
model = RandomForestClassifier(n_estimators = 100)
model = model.fit(exp_train,surv_train.ravel())
output = model.predict(exp_test)
print "OUTPUT:\n",output
print "OUTPUT TYPE:",type(output) # numpy.ndarray
print "OUTPUT SHAPE:",output.shape
print "surv_test:\n",surv_test
# So this outputs some kind of numeric value. I don't know where it comes from in a
# RandomForest. Perhaps it treated it as a multi-value prediction... let's see if the numbers
# in the output are in the input...
# output size: 59
# intersection size: 49
print "INTERSCTION of OUTPUT and surv_train:\n",np.intersect1d(output,surv_train)
print "INTERSECTION shape:\n",np.intersect1d(output,surv_train).shape
# So, I think it's pretty clea that it's just a multi-class classifier using these real numbers
# as 59 different output classes. | mit |
cluckmaster/MissionPlanner | Lib/site-packages/numpy/lib/recfunctions.py | 58 | 34495 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarary as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data=None, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
err_msg = "The number of arrays does not match the number of names"
raise ValueError(err_msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
elif not hasattr(dtypes, '__iter__'):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, "\
"a single dtype or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = names.index(name)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names:
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names:
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| gpl-3.0 |
fredhohman/a-viz-of-ice-and-fire | scripts/get_colors_toystory.py | 1 | 4497 | # https://github.com/fengsp/color-thief-py
# https://trac.ffmpeg.org/wiki/Create%20a%20thumbnail%20image%20every%20X%20seconds%20of%20the%20video
import numpy as np
from PIL import Image, ImageDraw
from operator import itemgetter
import os
from colorthief import ColorThief
import time
import json
import matplotlib.pyplot as plt
import cv2
import math
import colorsys
# File paths
# dir_path = '/Users/fredhohman/Github/cs-7450/data/screenshots/output/'
def make_episode_list():
episode_list = []
for season_num in [1, 2, 3, 4, 5, 6]:
for episode_num in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
episode = 's' + str(season_num) + 'e' + str(episode_num)
episode_list.append(episode)
return episode_list
# def get_colors(infile, outfile, numcolors=50, swatchsize=100, resize=150):
# # Get color palette
# image = Image.open(infile)
# image = image.resize((resize, resize))
# result = image.convert('P', palette=Image.ADAPTIVE, colors=numcolors)
# result.putalpha(0)
# colors = result.getcolors(resize*resize)
# # Manual check
# # for color in colors:
# # print color
# # Sort color palette by count
# colors = sorted(colors, key=itemgetter(0), reverse=True)
# # Manual check
# # print('\n')
# # for color in colors:
# # print color
# # print(type(colors))
# # Save colors to file
# pal = Image.new('RGB', (swatchsize*numcolors, swatchsize))
# draw = ImageDraw.Draw(pal)
# posx = 0
# for count, col in colors:
# draw.rectangle([posx, 0, posx+swatchsize, swatchsize], fill=col)
# posx = posx + swatchsize
# del draw
# pal.save(outfile, "PNG")
# this is for sorting colors
def step(r, g, b, repetitions=1):
lum = math.sqrt(.241 * r + .691 * g + .068 * b)
h, s, v = colorsys.rgb_to_hsv(r,g,b)
h2 = int(h * repetitions)
lum2 = int(lum * repetitions)
v2 = int(v * repetitions)
if h2 % 2 == 1:
v2 = repetitions - v2
lum = repetitions - lum
return (h2, lum, v2)
if __name__ == '__main__':
# for raw images
episode_list = make_episode_list()
# episode_list = episode_list[50:]
# print(episode_list)
# for created images
# episode_list = ['/Users/fredhohman/Github/cs-7450']
# episode = episode + '/'
# dir_path = '/Volumes/SG-1TB/toystory/3-screenshots/' #for raw images
dir_path = '/Users/fredhohman/Github/a-viz-of-ice-and-fire/data/toystory/3-color-palettes-chunk-temp/' # for chunked pngs
# dir_path = episode
# images = [img for img in os.listdir(dir_path) if img.startswith('out')] # for raw images
images = [img for img in os.listdir(dir_path) if img.endswith('.png')] # for chunk png
# images = images[0:10]
print(images)
print(str(len(images)) + ' images found')
color_count = 11
swatchsize = 10
posx = 0
posy = 0
palettes = []
for img in images:
print(str(img))
start = time.time()
color_thief = ColorThief(dir_path+img)
palette = color_thief.get_palette(color_count=color_count, quality=5)
end = time.time() - start
print(end)
# print(palette)
# print(len(palette))
if len(palette) == color_count:
print('COLOR PALETTE FOR IMAGE HAD 11 COLORS NOT 10')
break
# sort colors
print(palette) #checking format
temp = palette
temp_np = np.array(temp)
temp_np = temp_np / 255.0
temp_list = np.ndarray.tolist(temp_np)
temp_list.sort(key=lambda rgb: step(rgb[0],rgb[1],rgb[2],8))
sorted_np = np.round(np.array(temp_list)*255.0).astype(int)
sorted_list = np.ndarray.tolist(sorted_np)
palette = sorted_list
palette = [tuple(x) for x in palette] #convert to list of tuples
print(palette) #checking format
palettes.append(palette)
pal = Image.new('RGB', (swatchsize*len(palettes[0]), swatchsize*len(images)))
draw = ImageDraw.Draw(pal)
for cpal in palettes:
for col in cpal:
draw.rectangle([posx, posy, posx+swatchsize, posy+swatchsize], fill=col)
posx = posx + swatchsize
posy = posy + swatchsize
posx = 0
del draw
pal.save('../data/toystory/toystory-3-chunked-inline-sorted.png', "PNG")
with open('../data/toystory/toystory-3-chunked-inline-sorted.json', 'w') as outfile:
json.dump({'palettes': palettes}, outfile)
| mit |
tracierenea/gnuradio | gr-fec/python/fec/polar/channel_construction_awgn.py | 24 | 8560 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Based on 2 papers:
[1] Ido Tal, Alexander Vardy: 'How To Construct Polar Codes', 2013
for an in-depth description of a widely used algorithm for channel construction.
[2] Harish Vangala, Emanuele Viterbo, Yi Hong: 'A Comparative Study of Polar Code Constructions for the AWGN Channel', 2015
for an overview of different approaches
'''
from scipy.optimize import fsolve
from scipy.special import erfc
from helper_functions import *
from channel_construction_bec import bhattacharyya_bounds
def solver_equation(val, s):
cw_lambda = codeword_lambda_callable(s)
ic_lambda = instantanious_capacity_callable()
return lambda y: ic_lambda(cw_lambda(y)) - val
def solve_capacity(a, s):
eq = solver_equation(a, s)
res = fsolve(eq, 1)
return np.abs(res[0]) # only positive values needed.
def codeword_lambda_callable(s):
return lambda y: np.exp(-2 * y * np.sqrt(2 * s))
def codeword_lambda(y, s):
return codeword_lambda_callable(s)(y)
def instantanious_capacity_callable():
return lambda x : 1 - np.log2(1 + x) + (x * np.log2(x) / (1 + x))
def instantanious_capacity(x):
return instantanious_capacity_callable()(x)
def q_function(x):
# Q(x) = (1 / sqrt(2 * pi) ) * integral (x to inf) exp(- x ^ 2 / 2) dx
return .5 * erfc(x / np.sqrt(2))
def discretize_awgn(mu, design_snr):
'''
needed for Binary-AWGN channels.
in [1] described in Section VI
in [2] described as a function of the same name.
in both cases reduce infinite output alphabet to a finite output alphabet of a given channel.
idea:
1. instantaneous capacity C(x) in interval [0, 1]
2. split into mu intervals.
3. find corresponding output alphabet values y of likelihood ratio function lambda(y) inserted into C(x)
4. Calculate probability for each value given that a '0' or '1' is was transmitted.
'''
s = 10 ** (design_snr / 10)
a = np.zeros(mu + 1, dtype=float)
a[-1] = np.inf
for i in range(1, mu):
a[i] = solve_capacity(1. * i / mu, s)
factor = np.sqrt(2 * s)
tpm = np.zeros((2, mu))
for j in range(mu):
tpm[0][j] = q_function(factor + a[j]) - q_function(factor + a[j + 1])
tpm[1][j] = q_function(-1. * factor + a[j]) - q_function(-1. * factor + a[j + 1])
tpm = tpm[::-1]
tpm[0] = tpm[0][::-1]
tpm[1] = tpm[1][::-1]
return tpm
def instant_capacity_delta_callable():
return lambda a, b: -1. * (a + b) * np.log2((a + b) / 2) + a * np.log2(a) + b * np.log2(b)
def capacity_delta_callable():
c = instant_capacity_delta_callable()
return lambda a, b, at, bt: c(a, b) + c(at, bt) - c(a + at, b + bt)
def quantize_to_size(tpm, mu):
# This is a degrading merge, compare [1]
calculate_delta_I = capacity_delta_callable()
L = np.shape(tpm)[1]
if not mu < L:
print('WARNING: This channel gets too small!')
# lambda works on vectors just fine. Use Numpy vector awesomeness.
delta_i_vec = calculate_delta_I(tpm[0, 0:-1], tpm[1, 0:-1], tpm[0, 1:], tpm[1, 1:])
for i in range(L - mu):
d = np.argmin(delta_i_vec)
ap = tpm[0, d] + tpm[0, d + 1]
bp = tpm[1, d] + tpm[1, d + 1]
if d > 0:
delta_i_vec[d - 1] = calculate_delta_I(tpm[0, d - 1], tpm[1, d - 1], ap, bp)
if d < delta_i_vec.size - 1:
delta_i_vec[d + 1] = calculate_delta_I(ap, bp, tpm[0, d + 1], tpm[1, d + 1])
delta_i_vec = np.delete(delta_i_vec, d)
tpm = np.delete(tpm, d, axis=1)
tpm[0, d] = ap
tpm[1, d] = bp
return tpm
def upper_bound_z_params(z, block_size, design_snr):
upper_bound = bhattacharyya_bounds(design_snr, block_size)
z = np.minimum(z, upper_bound)
return z
def tal_vardy_tpm_algorithm(block_size, design_snr, mu):
mu = mu // 2 # make sure algorithm uses only as many bins as specified.
block_power = power_of_2_int(block_size)
channels = np.zeros((block_size, 2, mu))
channels[0] = discretize_awgn(mu, design_snr) * 2
print('Constructing polar code with Tal-Vardy algorithm')
print('(block_size = {0}, design SNR = {1}, mu = {2}'.format(block_size, design_snr, 2 * mu))
show_progress_bar(0, block_size)
for j in range(0, block_power):
u = 2 ** j
for t in range(u):
show_progress_bar(u + t, block_size)
# print("(u={0}, t={1}) = {2}".format(u, t, u + t))
ch1 = upper_convolve(channels[t], mu)
ch2 = lower_convolve(channels[t], mu)
channels[t] = quantize_to_size(ch1, mu)
channels[u + t] = quantize_to_size(ch2, mu)
z = np.zeros(block_size)
for i in range(block_size):
z[i] = bhattacharyya_parameter(channels[i])
z = z[bit_reverse_vector(np.arange(block_size), block_power)]
z = upper_bound_z_params(z, block_size, design_snr)
show_progress_bar(block_size, block_size)
print('')
print('channel construction DONE')
return z
def merge_lr_based(q, mu):
lrs = q[0] / q[1]
vals, indices, inv_indices = np.unique(lrs, return_index=True, return_inverse=True)
# compare [1] (20). Ordering of representatives according to LRs.
temp = np.zeros((2, len(indices)), dtype=float)
if vals.size < mu:
return q
for i in range(len(indices)):
merge_pos = np.where(inv_indices == i)[0]
sum_items = q[:, merge_pos]
if merge_pos.size > 1:
sum_items = np.sum(q[:, merge_pos], axis=1)
temp[0, i] = sum_items[0]
temp[1, i] = sum_items[1]
return temp
def upper_convolve(tpm, mu):
q = np.zeros((2, mu ** 2))
idx = -1
for i in range(mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2 + tpm[1, i] ** 2) / 2
q[1, idx] = tpm[0, i] * tpm[1, i]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j] + tpm[1, i] * tpm[1, j]
q[1, idx] = tpm[0, i] * tpm[1, j] + tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def lower_convolve(tpm, mu):
q = np.zeros((2, mu * (mu + 1)))
idx = -1
for i in range(0, mu):
idx += 1
q[0, idx] = (tpm[0, i] ** 2) / 2
q[1, idx] = (tpm[1, i] ** 2) / 2
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, i]
q[1, idx] = q[0, idx]
for j in range(i + 1, mu):
idx += 1
q[0, idx] = tpm[0, i] * tpm[0, j]
q[1, idx] = tpm[1, i] * tpm[1, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q[0, idx] = tpm[0, i] * tpm[1, j]
q[1, idx] = tpm[1, i] * tpm[0, j]
if q[0, idx] < q[1, idx]:
q[0, idx], q[1, idx] = swap_values(q[0, idx], q[1, idx])
idx += 1
q = np.delete(q, np.arange(idx, np.shape(q)[1]), axis=1)
q = merge_lr_based(q, mu)
q = normalize_q(q, tpm)
return q
def swap_values(first, second):
return second, first
def normalize_q(q, tpm):
original_factor = np.sum(tpm)
next_factor = np.sum(q)
factor = original_factor / next_factor
return q * factor
def main():
print 'channel construction AWGN main'
n = 8
m = 2 ** n
design_snr = 0.0
mu = 16
z_params = tal_vardy_tpm_algorithm(m, design_snr, mu)
print(z_params)
if 0:
import matplotlib.pyplot as plt
plt.plot(z_params)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
hdmetor/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
bloyl/mne-python | mne/tests/test_source_space.py | 4 | 43303 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from shutil import copytree
import pytest
import scipy
import numpy as np
from numpy.testing import (assert_array_equal, assert_allclose, assert_equal,
assert_array_less)
from mne.datasets import testing
import mne
from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
setup_source_space, setup_volume_source_space,
add_source_space_distances, read_bem_surfaces,
morph_source_spaces, SourceEstimate, make_sphere_model,
head_to_mni, compute_source_morph, pick_types,
read_bem_solution, read_freesurfer_lut, read_talxfm,
read_trans)
from mne.fixes import _get_img_fdata
from mne.utils import (requires_nibabel, run_subprocess,
modified_env, requires_mne, check_version)
from mne.surface import _accumulate_normals, _triangle_neighbors
from mne.source_space import _get_mgz_header
from mne.source_estimate import _get_src_type
from mne.transforms import apply_trans, _get_trans
from mne.source_space import (get_volume_labels_from_aseg,
get_volume_labels_from_src,
_compare_source_spaces,
compute_distance_to_sensors)
from mne.io.pick import _picks_to_idx
from mne.io.constants import FIFF
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
aseg_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
fname_vol = op.join(subjects_dir, 'sample', 'bem',
'sample-volume-7mm-src.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem.fif')
fname_bem_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem-sol.fif')
fname_bem_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem.fif')
fname_bem_3_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_fs = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')
fname_morph = op.join(subjects_dir, 'sample', 'bem',
'sample-fsaverage-ico-5-src.fif')
fname_src = op.join(
data_path, 'subjects', 'sample', 'bem', 'sample-oct-4-src.fif')
fname_fwd = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_small = op.join(base_dir, 'small-src.fif.gz')
fname_ave = op.join(base_dir, 'test-ave.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
@pytest.mark.parametrize('picks, limits', [
('meg', (0.02, 0.250)),
(None, (0.01, 0.250)), # should be same as EEG
('eeg', (0.01, 0.250)),
])
def test_compute_distance_to_sensors(picks, limits):
"""Test computation of distances between vertices and sensors."""
src = read_source_spaces(fname_src)
fwd = mne.read_forward_solution(fname_fwd)
info = fwd['info']
trans = read_trans(trans_fname)
# trans = fwd['info']['mri_head_t']
if isinstance(picks, str):
kwargs = dict()
kwargs[picks] = True
if picks == 'eeg':
info['dev_head_t'] = None # should not break anything
use_picks = pick_types(info, **kwargs, exclude=())
else:
use_picks = picks
n_picks = len(_picks_to_idx(info, use_picks, 'data', exclude=()))
# Make sure same vertices are used in src and fwd
src[0]['inuse'] = fwd['src'][0]['inuse']
src[1]['inuse'] = fwd['src'][1]['inuse']
src[0]['nuse'] = fwd['src'][0]['nuse']
src[1]['nuse'] = fwd['src'][1]['nuse']
n_verts = src[0]['nuse'] + src[1]['nuse']
# minimum distances between vertices and sensors
depths = compute_distance_to_sensors(src, info=info, picks=use_picks,
trans=trans)
assert depths.shape == (n_verts, n_picks)
assert limits[0] * 5 > depths.min() # meaningful choice of limits
assert_array_less(limits[0], depths)
assert_array_less(depths, limits[1])
# If source space from Forward Solution and trans=None (i.e. identity) then
# depths2 should be the same as depth.
depths2 = compute_distance_to_sensors(src=fwd['src'], info=info,
picks=use_picks, trans=None)
assert_allclose(depths, depths2, rtol=1e-5)
if picks != 'eeg':
# this should break things
info['dev_head_t'] = None
with pytest.raises(ValueError,
match='Transform between meg<->head'):
compute_distance_to_sensors(src, info, use_picks, trans)
@testing.requires_testing_data
@requires_nibabel()
def test_mgz_header():
"""Test MGZ header reading."""
import nibabel
header = _get_mgz_header(fname_mri)
mri_hdr = nibabel.load(fname_mri).header
assert_allclose(mri_hdr.get_data_shape(), header['dims'])
assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
assert_allclose(mri_hdr.get_ras2vox(), np.linalg.inv(header['vox2ras']))
def _read_small_src(remove=True):
src = read_source_spaces(fname_small)
if remove:
for s in src:
s['nearest'] = None
s['nearest_dist'] = None
s['pinfo'] = None
return src
def test_add_patch_info(monkeypatch):
"""Test adding patch info to source space."""
# let's setup a small source space
src = _read_small_src(remove=False)
src_new = _read_small_src()
# test that no patch info is added for small dist_limit
add_source_space_distances(src_new, dist_limit=0.00001)
assert all(s['nearest'] is None for s in src_new)
assert all(s['nearest_dist'] is None for s in src_new)
assert all(s['pinfo'] is None for s in src_new)
# now let's use one that works (and test our warning-throwing)
with monkeypatch.context() as m:
m.setattr(mne.source_space, '_DIST_WARN_LIMIT', 1)
with pytest.warns(RuntimeWarning, match='Computing distances for 258'):
add_source_space_distances(src_new)
_compare_source_spaces(src, src_new, 'approx')
# Old SciPy can't do patch info only
src_new = _read_small_src()
with monkeypatch.context() as m:
m.setattr(scipy, '__version__', '1.0')
with pytest.raises(RuntimeError, match='required to calculate patch '):
add_source_space_distances(src_new, dist_limit=0)
# New SciPy can
if check_version('scipy', '1.3'):
src_nodist = src.copy()
for s in src_nodist:
for key in ('dist', 'dist_limit'):
s[key] = None
add_source_space_distances(src_new, dist_limit=0)
_compare_source_spaces(src, src_new, 'approx')
@testing.requires_testing_data
def test_add_source_space_distances_limited(tmpdir):
"""Test adding distances to source space with a dist_limit."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 200 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmpdir.join('temp-src.fif')
add_source_space_distances(src_new, dist_limit=0.007)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
for so, sn in zip(src, src_new):
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code
do.data[do.data > 0.007] = 0
do.eliminate_zeros()
# make sure we have some comparable distances
assert np.sum(do.data < 0.007) > 400
# do comparison over the region computed
d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_add_source_space_distances(tmpdir):
"""Test adding distances to source space."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 19 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmpdir.join('temp-src.fif')
n_jobs = 2
assert n_do % n_jobs != 0
with pytest.raises(ValueError, match='non-negative'):
add_source_space_distances(src_new, dist_limit=-1)
add_source_space_distances(src_new, n_jobs=n_jobs)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
# iterate over both hemispheres
for so, sn in zip(src, src_new):
v = so['vertno'][:n_do]
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code (some residual), and Python
ds = list()
for d in [do, dn]:
d.data[d.data > 0.007] = 0
d = d[v][:, v]
d.eliminate_zeros()
ds.append(d)
# make sure we actually calculated some comparable distances
assert np.sum(ds[0].data < 0.007) > 10
# do comparison
d = ds[0] - ds[1]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space(tmpdir):
"""Test setting up (and reading/writing) discrete source spaces."""
src = read_source_spaces(fname)
v = src[0]['vertno']
# let's make a discrete version with the C code, and with ours
temp_name = tmpdir.join('temp-src.fif')
# save
temp_pos = tmpdir.join('temp-pos.txt')
np.savetxt(str(temp_pos), np.c_[src[0]['rr'][v], src[0]['nn'][v]])
# let's try the spherical one (no bem or surf supplied)
run_subprocess(['mne_volume_source_space', '--meters',
'--pos', temp_pos, '--src', temp_name])
src_c = read_source_spaces(temp_name)
pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
src_new = setup_volume_source_space(pos=pos_dict)
assert src_new.kind == 'discrete'
_compare_source_spaces(src_c, src_new, mode='approx')
assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
rtol=1e-3, atol=1e-6)
assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
rtol=1e-3, atol=1e-6)
# now do writing
write_source_spaces(temp_name, src_c, overwrite=True)
src_c2 = read_source_spaces(temp_name)
_compare_source_spaces(src_c, src_c2)
# now do MRI
pytest.raises(ValueError, setup_volume_source_space, 'sample',
pos=pos_dict, mri=fname_mri)
assert repr(src_new).split('~')[0] == repr(src_c).split('~')[0]
assert ' kB' in repr(src_new)
assert src_new.kind == 'discrete'
assert _get_src_type(src_new, None) == 'discrete'
@requires_nibabel()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_source_space(tmpdir):
"""Test setting up volume source spaces."""
src = read_source_spaces(fname_vol)
temp_name = tmpdir.join('temp-src.fif')
surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
surf['rr'] *= 1e3 # convert to mm
bem_sol = read_bem_solution(fname_bem_3_sol)
bem = read_bem_solution(fname_bem_sol)
# The one in the testing dataset (uses bem as bounds)
for this_bem, this_surf in zip(
(bem, fname_bem, fname_bem_3, bem_sol, fname_bem_3_sol, None),
(None, None, None, None, None, surf)):
src_new = setup_volume_source_space(
'sample', pos=7.0, bem=this_bem, surface=this_surf,
subjects_dir=subjects_dir)
write_source_spaces(temp_name, src_new, overwrite=True)
src[0]['subject_his_id'] = 'sample' # XXX: to make comparison pass
_compare_source_spaces(src, src_new, mode='approx')
del src_new
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
with pytest.raises(IOError, match='surface file.*not found'):
setup_volume_source_space(
'sample', surface='foo', mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'][-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
with pytest.raises(ValueError, match='BEM is not in MRI coord.* got head'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'] = bem['surfs'][:-1] # no inner skull surf
with pytest.raises(ValueError, match='Could not get inner skul.*from BEM'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
del bem
assert repr(src) == repr(src_new)
assert ' MB' in repr(src)
assert src.kind == 'volume'
# Spheres
sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
src = setup_volume_source_space(pos=10, sphere=(0., 0., 0., 0.09))
src_new = setup_volume_source_space(pos=10, sphere=sphere)
_compare_source_spaces(src, src_new, mode='exact')
with pytest.raises(ValueError, match='sphere, if str'):
setup_volume_source_space(sphere='foo')
# Need a radius
sphere = make_sphere_model(head_radius=None)
with pytest.raises(ValueError, match='be spherical with multiple layers'):
setup_volume_source_space(sphere=sphere)
@testing.requires_testing_data
@requires_mne
def test_other_volume_source_spaces(tmpdir):
"""Test setting up other volume source spaces."""
# these are split off because they require the MNE tools, and
# Travis doesn't seem to like them
# let's try the spherical one (no bem or surf supplied)
temp_name = tmpdir.join('temp-src.fif')
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name,
'--mri', fname_mri])
src = read_source_spaces(temp_name)
sphere = (0., 0., 0., 0.09)
src_new = setup_volume_source_space(None, pos=7.0, mri=fname_mri,
subjects_dir=subjects_dir,
sphere=sphere)
# we use a more accurate elimination criteria, so let's fix the MNE-C
# source space
assert len(src_new[0]['vertno']) == 7497
assert len(src) == 1
assert len(src_new) == 1
good_mask = np.in1d(src[0]['vertno'], src_new[0]['vertno'])
src[0]['inuse'][src[0]['vertno'][~good_mask]] = 0
assert src[0]['inuse'].sum() == 7497
src[0]['vertno'] = src[0]['vertno'][good_mask]
assert len(src[0]['vertno']) == 7497
src[0]['nuse'] = len(src[0]['vertno'])
assert src[0]['nuse'] == 7497
_compare_source_spaces(src_new, src, mode='approx')
assert 'volume, shape' in repr(src)
del src
del src_new
pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=7.0,
sphere=[1., 1.], mri=fname_mri, # bad sphere
subjects_dir=subjects_dir)
# now without MRI argument, it should give an error when we try
# to read it
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name])
pytest.raises(ValueError, read_source_spaces, temp_name)
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_triangle_neighbors():
"""Test efficient vertex neighboring triangles for surfaces."""
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert all(np.array_equal(nt1, nt2)
for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
def test_accumulate_normals():
"""Test efficient normal accumulation for surfaces."""
# set up comparison
n_pts = int(1.6e5) # approx number in sample source space
n_tris = int(3.2e5)
# use all positive to make a worst-case for cumulative summation
# (real "nn" vectors will have both positive and negative values)
tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
tris = np.c_[tris, tris + 1, tris + 2]
tri_nn = rng.rand(n_tris, 3)
this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
# cut-and-paste from original code in surface.py:
# Find neighboring triangles and accumulate vertex normals
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
# vertex normals
verts = this['tris'][p]
this['nn'][verts, :] += this['tri_nn'][p, :]
nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
# the moment of truth (or reckoning)
assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_setup_source_space(tmpdir):
"""Test setting up ico, oct, and all source spaces."""
fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
# first lets test some input params
for spacing in ('oct', 'oct6e'):
with pytest.raises(ValueError, match='subdivision must be an integer'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
for spacing in ('oct0', 'oct-4'):
with pytest.raises(ValueError, match='oct subdivision must be >= 1'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='ico subdivision must be >= 0'):
setup_source_space('sample', spacing='ico-4',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='7emm',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='alls',
add_dist=False, subjects_dir=subjects_dir)
# ico 5 (fsaverage) - write to temp file
src = read_source_spaces(fname_ico)
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('fsaverage', spacing='ico5',
subjects_dir=subjects_dir, add_dist=False)
_compare_source_spaces(src, src_new, mode='approx')
assert repr(src).split('~')[0] == repr(src_new).split('~')[0]
assert repr(src).count('surface (') == 2
assert_array_equal(src[0]['vertno'], np.arange(10242))
assert_array_equal(src[1]['vertno'], np.arange(10242))
# oct-6 (sample) - auto filename + IO
src = read_source_spaces(fname)
temp_name = tmpdir.join('temp-src.fif')
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('sample', spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
write_source_spaces(temp_name, src_new, overwrite=True)
assert_equal(src_new[0]['nuse'], 4098)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
# all source points - no file writing
src_new = setup_source_space('sample', spacing='all',
subjects_dir=subjects_dir, add_dist=False)
assert src_new[0]['nuse'] == len(src_new[0]['rr'])
assert src_new[1]['nuse'] == len(src_new[1]['rr'])
# dense source space to hit surf['inuse'] lines of _create_surf_spacing
pytest.raises(RuntimeError, setup_source_space, 'sample',
spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
@testing.requires_testing_data
@requires_mne
@pytest.mark.slowtest
@pytest.mark.timeout(60)
@pytest.mark.parametrize('spacing', [2, 7])
def test_setup_source_space_spacing(tmpdir, spacing):
"""Test setting up surface source spaces using a given spacing."""
copytree(op.join(subjects_dir, 'sample'), str(tmpdir.join('sample')))
args = [] if spacing == 7 else ['--spacing', str(spacing)]
with modified_env(SUBJECTS_DIR=str(tmpdir), SUBJECT='sample'):
run_subprocess(['mne_setup_source_space'] + args)
src = read_source_spaces(tmpdir.join('sample', 'bem',
'sample-%d-src.fif' % spacing))
src_new = setup_source_space('sample', spacing=spacing, add_dist=False,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx', nearest=True)
# Degenerate conditions
with pytest.raises(TypeError, match='spacing must be.*got.*float.*'):
setup_source_space('sample', 7., subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='spacing must be >= 2, got 1'):
setup_source_space('sample', 1, subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_source_spaces():
"""Test reading of source space meshes."""
src = read_source_spaces(fname, patch_stats=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert lh_faces.min() == 0
assert lh_faces.max() == lh_points.shape[0] - 1
assert lh_use_faces.min() >= 0
assert lh_use_faces.max() <= lh_points.shape[0] - 1
assert rh_faces.min() == 0
assert rh_faces.max() == rh_points.shape[0] - 1
assert rh_use_faces.min() >= 0
assert rh_use_faces.max() <= rh_points.shape[0] - 1
@pytest.mark.slowtest
@testing.requires_testing_data
def test_write_source_space(tmpdir):
"""Test reading and writing of source spaces."""
src0 = read_source_spaces(fname, patch_stats=False)
temp_fname = tmpdir.join('tmp-src.fif')
write_source_spaces(temp_fname, src0)
src1 = read_source_spaces(temp_fname, patch_stats=False)
_compare_source_spaces(src0, src1)
# test warnings on bad filenames
src_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-src.fif'):
write_source_spaces(src_badname, src0)
with pytest.warns(RuntimeWarning, match='-src.fif'):
read_source_spaces(src_badname)
@testing.requires_testing_data
def test_vertex_to_mni():
"""Test conversion of vertices to MNI coordinates."""
# obtained using "tksurfer (sample) (l/r)h white"
vertices = [100960, 7620, 150549, 96761]
coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
[-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
hemis = [0, 0, 0, 1]
coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
# less than 1mm error
assert_allclose(coords, coords_2, atol=1.0)
@testing.requires_testing_data
def test_head_to_mni():
"""Test conversion of aseg vertices to MNI coordinates."""
# obtained using freeview
coords = np.array([[22.52, 11.24, 17.72], [22.52, 5.46, 21.58],
[16.10, 5.46, 22.23], [21.24, 8.36, 22.23]]) / 1000.
xfm = read_talxfm('sample', subjects_dir)
coords_MNI = apply_trans(xfm['trans'], coords) * 1000.
mri_head_t, _ = _get_trans(trans_fname, 'mri', 'head', allow_none=False)
# obtained from sample_audvis-meg-oct-6-mixed-fwd.fif
coo_right_amygdala = np.array([[0.01745682, 0.02665809, 0.03281873],
[0.01014125, 0.02496262, 0.04233755],
[0.01713642, 0.02505193, 0.04258181],
[0.01720631, 0.03073877, 0.03850075]])
coords_MNI_2 = head_to_mni(coo_right_amygdala, 'sample', mri_head_t,
subjects_dir)
# less than 1mm error
assert_allclose(coords_MNI, coords_MNI_2, atol=10.0)
@requires_nibabel()
@testing.requires_testing_data
def test_vertex_to_mni_fs_nibabel(monkeypatch):
"""Test equivalence of vert_to_mni for nibabel and freesurfer."""
n_check = 1000
subject = 'sample'
vertices = rng.randint(0, 100000, n_check)
hemis = rng.randint(0, 1, n_check)
coords = vertex_to_mni(vertices, hemis, subject, subjects_dir)
read_mri = mne.source_space._read_mri_info
monkeypatch.setattr(
mne.source_space, '_read_mri_info',
lambda *args, **kwargs: read_mri(*args, use_nibabel=True, **kwargs))
coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir)
# less than 0.1 mm error
assert_allclose(coords, coords_2, atol=0.1)
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('fname', [
None,
op.join(op.dirname(mne.__file__), 'data', 'FreeSurferColorLUT.txt'),
])
def test_read_freesurfer_lut(fname, tmpdir):
"""Test reading volume label names."""
atlas_ids, colors = read_freesurfer_lut(fname)
assert list(atlas_ids).count('Brain-Stem') == 1
assert len(colors) == len(atlas_ids) == 1266
label_names, label_colors = get_volume_labels_from_aseg(
aseg_fname, return_colors=True)
assert isinstance(label_names, list)
assert isinstance(label_colors, list)
assert label_names.count('Brain-Stem') == 1
for c in label_colors:
assert isinstance(c, np.ndarray)
assert c.shape == (4,)
assert len(label_names) == len(label_colors) == 46
with pytest.raises(ValueError, match='must be False'):
get_volume_labels_from_aseg(
aseg_fname, return_colors=True, atlas_ids=atlas_ids)
label_names_2 = get_volume_labels_from_aseg(
aseg_fname, atlas_ids=atlas_ids)
assert label_names == label_names_2
# long name (only test on one run)
if fname is not None:
return
fname = str(tmpdir.join('long.txt'))
names = ['Anterior_Cingulate_and_Medial_Prefrontal_Cortex-' + hemi
for hemi in ('lh', 'rh')]
ids = np.arange(1, len(names) + 1)
colors = [(id_,) * 4 for id_ in ids]
with open(fname, 'w') as fid:
for name, id_, color in zip(names, ids, colors):
out_color = ' '.join('%3d' % x for x in color)
line = '%d %s %s\n' % (id_, name, out_color)
fid.write(line)
lut, got_colors = read_freesurfer_lut(fname)
assert len(lut) == len(got_colors) == len(names) == len(ids)
for name, id_, color in zip(names, ids, colors):
assert name in lut
assert name in got_colors
assert_array_equal(got_colors[name][:3], color[:3])
assert lut[name] == id_
with open(fname, 'w') as fid:
for name, id_, color in zip(names, ids, colors):
out_color = ' '.join('%3d' % x for x in color[:3]) # wrong length!
line = '%d %s %s\n' % (id_, name, out_color)
fid.write(line)
with pytest.raises(RuntimeError, match='formatted'):
read_freesurfer_lut(fname)
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('pass_ids', (True, False))
def test_source_space_from_label(tmpdir, pass_ids):
"""Test generating a source space from volume label."""
aseg_short = 'aseg.mgz'
atlas_ids, _ = read_freesurfer_lut()
volume_label = 'Left-Cerebellum-Cortex'
# Test pos as dict
pos = dict()
with pytest.raises(ValueError, match='mri must be None if pos is a dict'):
setup_volume_source_space(
'sample', pos=pos, volume_label=volume_label, mri=aseg_short,
subjects_dir=subjects_dir)
# Test T1.mgz provided
with pytest.raises(RuntimeError, match=r'Must use a \*aseg.mgz file'):
setup_volume_source_space(
'sample', mri='T1.mgz', volume_label=volume_label,
subjects_dir=subjects_dir)
# Test invalid volume label
mri = aseg_short
with pytest.raises(ValueError, match="'Left-Cerebral' not found.*Did you"):
setup_volume_source_space(
'sample', volume_label='Left-Cerebral', mri=mri,
subjects_dir=subjects_dir)
# These should be equivalent
if pass_ids:
use_volume_label = {volume_label: atlas_ids[volume_label]}
else:
use_volume_label = volume_label
# ensure it works even when not provided (detect that it should be aseg)
src = setup_volume_source_space(
'sample', volume_label=use_volume_label, add_interpolator=False,
subjects_dir=subjects_dir)
assert_equal(volume_label, src[0]['seg_name'])
assert src[0]['nuse'] == 404 # for our given pos and label
# test reading and writing
out_name = tmpdir.join('temp-src.fif')
write_source_spaces(out_name, src)
src_from_file = read_source_spaces(out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
@testing.requires_testing_data
@requires_nibabel()
def test_source_space_exclusive_complete(src_volume_labels):
"""Test that we produce exclusive and complete labels."""
# these two are neighbors and are quite large, so let's use them to
# ensure no overlaps
src, volume_labels, _ = src_volume_labels
ii = volume_labels.index('Left-Cerebral-White-Matter')
jj = volume_labels.index('Left-Cerebral-Cortex')
assert src[ii]['nuse'] == 755 # 2034 with pos=5, was 2832
assert src[jj]['nuse'] == 616 # 1520 with pos=5, was 2623
src_full = read_source_spaces(fname_vol)
# This implicitly checks for overlap because np.sort would preserve
# duplicates, and it checks for completeness because the sets should match
assert_array_equal(src_full[0]['vertno'],
np.sort(np.concatenate([s['vertno'] for s in src])))
for si, s in enumerate(src):
assert_allclose(src_full[0]['rr'], s['rr'], atol=1e-6)
# also check single_volume=True -- should be the same result
src_single = setup_volume_source_space(
src[0]['subject_his_id'], 7., 'aseg.mgz', bem=fname_bem,
volume_label=volume_labels, single_volume=True, add_interpolator=False,
subjects_dir=subjects_dir)
assert len(src_single) == 1
assert 'Unknown+Left-Cerebral-White-Matter+Left-' in repr(src_single)
assert_array_equal(src_full[0]['vertno'], src_single[0]['vertno'])
@pytest.mark.timeout(60) # ~24 sec on Travis
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_read_volume_from_src():
"""Test reading volumes from a mixed source space."""
labels_vol = ['Left-Amygdala',
'Brain-Stem',
'Right-Amygdala']
src = read_source_spaces(fname)
# Setup a volume source space
vol_src = setup_volume_source_space('sample', mri=aseg_fname,
pos=5.0,
bem=fname_bem,
volume_label=labels_vol,
subjects_dir=subjects_dir)
# Generate the mixed source space, testing some list methods
assert src.kind == 'surface'
assert vol_src.kind == 'volume'
src += vol_src
assert src.kind == 'mixed'
assert vol_src.kind == 'volume'
assert src[:2].kind == 'surface'
assert src[2:].kind == 'volume'
assert src[:].kind == 'mixed'
with pytest.raises(RuntimeError, match='Invalid source space'):
src[::2]
volume_src = get_volume_labels_from_src(src, 'sample', subjects_dir)
volume_label = volume_src[0].name
volume_label = 'Left-' + volume_label.replace('-lh', '')
# Test
assert_equal(volume_label, src[2]['seg_name'])
assert_equal(src[2]['type'], 'vol')
@testing.requires_testing_data
@requires_nibabel()
def test_combine_source_spaces(tmpdir):
"""Test combining source spaces."""
import nibabel as nib
rng = np.random.RandomState(2)
volume_labels = ['Brain-Stem', 'Right-Hippocampus'] # two fairly large
# create a sparse surface source space to ensure all get mapped
# when mri_resolution=False
srf = setup_source_space('sample', 'oct3', add_dist=False,
subjects_dir=subjects_dir)
# setup 2 volume source spaces
vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_labels[0],
mri=aseg_fname, add_interpolator=False)
# setup a discrete source space
rr = rng.randint(0, 11, (20, 3)) * 5e-3
nn = np.zeros(rr.shape)
nn[:, -1] = 1
pos = {'rr': rr, 'nn': nn}
disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
pos=pos, verbose='error')
# combine source spaces
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
src = srf + vol + disc
assert src.kind == 'mixed'
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
# test addition of source spaces
assert len(src) == 4
# test reading and writing
src_out_name = tmpdir.join('temp-src.fif')
src.save(src_out_name)
src_from_file = read_source_spaces(src_out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
assert repr(src).split('~')[0] == repr(src_from_file).split('~')[0]
assert_equal(src.kind, 'mixed')
# test that all source spaces are in MRI coordinates
coord_frames = np.array([s['coord_frame'] for s in src])
assert (coord_frames == FIFF.FIFFV_COORD_MRI).all()
# test errors for export_volume
image_fname = tmpdir.join('temp-image.mgz')
# source spaces with no volume
with pytest.raises(ValueError, match='at least one volume'):
srf.export_volume(image_fname, verbose='error')
# unrecognized source type
disc2 = disc.copy()
disc2[0]['type'] = 'kitty'
with pytest.raises(ValueError, match='Invalid value'):
src + disc2
del disc2
# unrecognized file type
bad_image_fname = tmpdir.join('temp-image.png')
# vertices outside vol space warning
pytest.raises(ValueError, src.export_volume, bad_image_fname,
verbose='error')
# mixed coordinate frames
disc3 = disc.copy()
disc3[0]['coord_frame'] = 10
src_mixed_coord = src + disc3
with pytest.raises(ValueError, match='must be in head coordinates'):
src_mixed_coord.export_volume(image_fname, verbose='error')
# now actually write it
fname_img = tmpdir.join('img.nii')
for mri_resolution in (False, 'sparse', True):
for src, up in ((vol, 705),
(srf + vol, 27272),
(disc + vol, 705)):
src.export_volume(
fname_img, use_lut=False,
mri_resolution=mri_resolution, overwrite=True)
img_data = _get_img_fdata(nib.load(str(fname_img)))
n_src = img_data.astype(bool).sum()
n_want = sum(s['nuse'] for s in src)
if mri_resolution is True:
n_want += up
assert n_src == n_want, src
# gh-8004
temp_aseg = tmpdir.join('aseg.mgz')
aseg_img = nib.load(aseg_fname)
aseg_affine = aseg_img.affine
aseg_affine[:3, :3] *= 0.7
new_aseg = nib.MGHImage(aseg_img.dataobj, aseg_affine)
nib.save(new_aseg, str(temp_aseg))
lh_cereb = mne.setup_volume_source_space(
"sample", mri=temp_aseg, volume_label="Left-Cerebellum-Cortex",
add_interpolator=False, subjects_dir=subjects_dir)
src = srf + lh_cereb
with pytest.warns(RuntimeWarning, match='2 surf vertices lay outside'):
src.export_volume(image_fname, mri_resolution="sparse", overwrite=True)
@testing.requires_testing_data
def test_morph_source_spaces():
"""Test morphing of source spaces."""
src = read_source_spaces(fname_fs)
src_morph = read_source_spaces(fname_morph)
src_morph_py = morph_source_spaces(src, 'sample',
subjects_dir=subjects_dir)
_compare_source_spaces(src_morph, src_morph_py, mode='approx')
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morphed_source_space_return():
"""Test returning a morphed source space to the original subject."""
# let's create some random data on fsaverage
data = rng.randn(20484, 1)
tmin, tstep = 0, 1.
src_fs = read_source_spaces(fname_fs)
stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
tmin, tstep, 'fsaverage')
n_verts_fs = sum(len(s['vertno']) for s in src_fs)
# Create our morph source space
src_morph = morph_source_spaces(src_fs, 'sample',
subjects_dir=subjects_dir)
n_verts_sample = sum(len(s['vertno']) for s in src_morph)
assert n_verts_fs == n_verts_sample
# Morph the data over using standard methods
stc_morph = compute_source_morph(
src_fs, 'fsaverage', 'sample',
spacing=[s['vertno'] for s in src_morph], smooth=1,
subjects_dir=subjects_dir, warn=False).apply(stc_fs)
assert stc_morph.data.shape[0] == n_verts_sample
# We can now pretend like this was real data we got e.g. from an inverse.
# To be complete, let's remove some vertices
keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
for v in stc_morph.vertices]
stc_morph = SourceEstimate(
np.concatenate([stc_morph.lh_data[keeps[0]],
stc_morph.rh_data[keeps[1]]]),
[v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
'sample')
# Return it to the original subject
stc_morph_return = stc_morph.to_original_src(
src_fs, subjects_dir=subjects_dir)
# This should fail (has too many verts in SourceMorph)
with pytest.warns(RuntimeWarning, match='vertices not included'):
morph = compute_source_morph(
src_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='vertices do not match'):
morph.apply(stc_morph)
# Compare to the original data
with pytest.warns(RuntimeWarning, match='vertices not included'):
stc_morph_morph = compute_source_morph(
src=stc_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir).apply(stc_morph)
assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Explicitly test having two vertices map to the same target vertex. We
# simulate this by having two vertices be at the same position.
src_fs2 = src_fs.copy()
vert1, vert2 = src_fs2[0]['vertno'][:2]
src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
stc_morph_return = stc_morph.to_original_src(
src_fs2, subjects_dir=subjects_dir)
# test to_original_src method result equality
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Degenerate cases
stc_morph.subject = None # no .subject provided
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
stc_morph.subject = 'sample'
del src_fs[0]['subject_his_id'] # no name in src_fsaverage
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='foo', subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'sample'
src = read_source_spaces(fname) # wrong source space
pytest.raises(RuntimeError, stc_morph.to_original_src,
src, subjects_dir=subjects_dir)
# The following code was used to generate small-src.fif.gz.
# Unfortunately the C code bombs when trying to add source space distances,
# possibly due to incomplete "faking" of a smaller surface on our part here.
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import mne
data_path = mne.datasets.sample.data_path()
src = mne.setup_source_space('sample', fname=None, spacing='oct5')
hemis = ['lh', 'rh']
fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
vs = list()
for s, fname in zip(src, fnames):
coords = s['rr'][s['vertno']]
vs.append(s['vertno'])
idx = -1 * np.ones(len(s['rr']))
idx[s['vertno']] = np.arange(s['nuse'])
faces = s['use_tris']
faces = idx[faces]
mne.write_surface(fname, coords, faces)
# we need to move sphere surfaces
spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
for s in spheres:
os.rename(s, s + '.bak')
try:
for s, v in zip(spheres, vs):
coords, faces = mne.read_surface(s + '.bak')
coords = coords[v]
mne.write_surface(s, coords, faces)
src = mne.setup_source_space('sample', fname=None, spacing='oct4',
surface='decimated')
finally:
for s in spheres:
os.rename(s + '.bak', s)
fname = 'small-src.fif'
fname_gz = fname + '.gz'
mne.write_source_spaces(fname, src)
mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
'--srcp', fname])
mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
"""
| bsd-3-clause |
michigraber/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
idlead/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
zengxiao1028/MobileDeepPill | model_converter/gen_reference_database.py | 1 | 1225 | import numpy as np
import tensorflow as tf
from sklearn.externals import joblib
import json
from pill_feature_generator import FeatureGenerator
def main():
output_graph_path = 'model/frozen_model.pb'
fg = FeatureGenerator(output_graph_path,input_size=227)
pills = joblib.load('data/pill_imgs_227_color_fb.pkl')
pill_imgs = []
pill_names = []
for pill_img, file_name in zip(pills[0],pills[1]):
#skip consumer images
if file_name.find('S')<0:
continue
pill_imgs.append(pill_img)
pill_names.append(file_name)
pill_imgs = np.array(pill_imgs)
color_fea,gray_fea = fg.gen_feature(pill_imgs)
joblib.dump((pill_names,color_fea,gray_fea),'data/ref_db.pkl',compress=3)
print(color_fea.shape,gray_fea.shape)
def convert_to_json():
pill_names, color_fea, gray_fea = joblib.load('data/ref_db.pkl')
json_dict = dict()
json_dict['ref_pills']=[]
for name, color, gray in zip(pill_names,color_fea,gray_fea):
json_dict['ref_pills'].append( [ name ,color.tolist(), gray.tolist()] )
with open('data/ref_db.json', 'w') as f:
json.dump(json_dict, f)
if __name__ == '__main__':
#main()
convert_to_json()
| mit |
Myasuka/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
neurodata/ndgrutedb | MR-OCP/MROCPdjango/computation/plotting/distPlot.py | 2 | 5303 | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Disa Mhembere, Johns Hopkins University
# Separated: 10/2/2012
# Plot all .np arrays in a common dir on the same axis & save
# 1 indexed
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pylab as pl
import numpy as np
import os
import sys
from glob import glob
import argparse
import scipy
from scipy import interpolate
# Issues: Done nothing with MAD
def plotInvDist(invDir, pngName, numBins =100):
# ClustCoeff Degree Eigen MAD numEdges.npy ScanStat Triangle
MADdir = "MAD"
ccDir = "ClustCoeff"
DegDir = "Degree"
EigDir = "Eigen"
SS1dir = "ScanStat"
triDir = "Triangle"
invDirs = [triDir, ccDir, SS1dir, DegDir ]
if not os.path.exists(invDir):
print "%s does not exist" % invDir
sys.exit(1)
pl.figure(2)
fig_gl, axes = pl.subplots(nrows=3, ncols=2)
# fig_gl.tight_layout()
for idx, drcty in enumerate (invDirs):
for arrfn in glob(os.path.join(invDir, drcty,'*.npy')):
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "Ivariant file not found %s" % arrfn
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=numBins , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
n = n/float(sum(n))
fig = pl.figure(2)
fig.subplots_adjust(hspace=.5)
ax = pl.subplot(3,2,idx+1)
if idx == 0:
plt.axis([0, 35, 0, 0.04])
ax.set_yticks(scipy.arange(0,0.04,0.01))
if idx == 1 or idx == 2:
ax.set_yticks(scipy.arange(0,0.03,0.01))
if idx == 3:
ax.set_yticks(scipy.arange(0,0.04,0.01))
# Interpolation
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.03) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
if idx == 0:
pl.ylabel('Probability')
pl.xlabel('log number of local triangles')
if idx == 1:
#pl.ylabel('Probability') #**
pl.xlabel('log local clustering coefficient')
if idx == 2:
pl.ylabel('Probability')
pl.xlabel('log scan1 statistic')
if idx == 3:
#pl.ylabel('Probability') #**
pl.xlabel('log local degree')
''' Eigenvalues '''
ax = pl.subplot(3,2,5)
ax.set_yticks(scipy.arange(0,16,4))
for eigValInstance in glob(os.path.join(invDir, EigDir,"*.npy")):
try:
eigv = np.load(eigValInstance)
except:
print "Eigenvalue array"
n = len(eigv)
sa = (np.sort(eigv)[::-1])
pl.plot(range(1,n+1), sa/10000, color='grey')
pl.ylabel('Magnitude ($X 10^4$) ')
pl.xlabel('eigenvalue rank')
''' Edges '''
arrfn = os.path.join(invDir, 'Globals', 'numEdges.npy')
try:
arr = np.load(arrfn)
arr = np.log(arr[arr.nonzero()])
print "Processing %s..." % arrfn
except:
print "Ivariant file not found %s" % arrfn
pl.figure(1)
n, bins, patches = pl.hist(arr, bins=10 , range=None, normed=False, weights=None, cumulative=False, \
bottom=None, histtype='stepfilled', align='mid', orientation='vertical', \
rwidth=None, log=False, color=None, label=None, hold=None)
n = np.append(n,0)
fig = pl.figure(2)
ax = pl.subplot(3,2,6)
ax.set_xticks(scipy.arange(17.2,18.1,0.2))
f = interpolate.interp1d(bins, n, kind='cubic')
x = np.arange(bins[0],bins[-1],0.01) # vary linspc
interp = f(x)
ltz = interp < 0
interp[ltz] = 0
pl.plot(x, interp,color ='grey' ,linewidth=1)
pl.ylabel('Frequency')
pl.xlabel('log global edge number')
import scipy.io as sio
sio.savemat("Edgedata", {"data": interp})
sio.savemat("Edgedatax", {"data": x})
#pl.savefig(pngName+'.pdf')
pl.savefig(pngName+'.png')
def main():
parser = argparse.ArgumentParser(description='Plot distribution of invariant arrays of several graphs')
parser.add_argument('invDir', action='store',help='The full path of directory containing .npy invariant arrays')
parser.add_argument('pngName', action='store', help='Full path of directory of resulting png file')
parser.add_argument('numBins', type = int, action='store', help='Number of bins')
result = parser.parse_args()
plotInvDist(result.invDir, result.pngName, result.numBins)
if __name__ == '__main__':
main()
| apache-2.0 |
gsprint23/sensor_data_preprocessing | src/utils.py | 1 | 17399 | '''
Copyright (C) 2015 Gina L. Sprint
Email: Gina Sprint <gsprint@eecs.wsu.edu>
This file is part of sensor_data_preprocessing.
sensor_data_preprocessing is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
sensor_data_preprocessing is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with sensor_data_preprocessing. If not, see <http://www.gnu.org/licenses/>.
OrientationFiltering --- utils.py
Created on Apr 2, 2015
This code is specific to processing Shimmer3 sensor data for our ambulatory circuit
wearable sensor study. Functions of interest that could be used for other research
domains include:
1. apply_filter()
2. orient_shank()
2. orient_COM()
Dependencies:
pandas
@author: Gina Sprint and Vladimir Borisov
'''
import numpy as np
import pandas as pd
import scipy.signal as signal
import matplotlib.pyplot as plt
TS_OFFSET = 500
accel_labels = ["Wide Range Accelerometer X", \
"Wide Range Accelerometer Y", \
"Wide Range Accelerometer Z"]
gyro_labels = ["Gyroscope X", \
"Gyroscope Y", \
"Gyroscope Z"]
def closest_timestamp(ind_list, ts):
'''
Find the nearest value in an index given an estimated timestamp.
Keyword arguments:
'''
closest = ind_list[0]
for val in ind_list:
delta = abs(val-ts)
if delta < abs(closest - ts):
closest = val
return closest
def compute_vector_norm(vec):
'''
Compute vector norm.
Keyword arguments:
'''
return np.linalg.norm(vec)
def compute_avg_accel_norm(XYZ_df):
'''
Compute vector norm.
Keyword arguments:
'''
norms = []
for i in range(len(XYZ_df)):
norms.append(compute_vector_norm(np.array(XYZ_df.iloc[i][accel_labels])))
return np.mean(norms)
def apply_filter(df, sensor_loc):
'''
Applied filters according to the following:
SHIMMER DEFAULT == 51.2Hz
[0-1] 0-nyquist= (sampling rate)/2
NYQUIST == 25.6Hz
0.1171875->3hz 0.00390625->.1Hz
10 Hz Add reference... => .390625
Keyword arguments:
'''
print "apply_filter(): sensor_loc " + sensor_loc
if sensor_loc == "HIP" or sensor_loc == "WALKER":
B,A = signal.butter(4, 0.00388, 'highpass') # 0.1 Hz
D,C = signal.butter(4, 0.1171875, 'lowpass') # 3 HZ
elif sensor_loc == "LA" or sensor_loc == "RA" or sensor_loc == "CANE":
B,A = signal.butter(4, 0.00388, 'highpass') # 0.1 Hz
D,C = signal.butter(4, 0.390625, 'lowpass') # 10 Hz
# Gyro filter lowpass cutoff at 4Hz (Tong and Granat 1999)
E, F = signal.butter(4, 0.15625, 'lowpass') # 4Hz => .15625
for label in accel_labels:
ser = df[label]
high = signal.filtfilt(B, A, ser)
band = signal.filtfilt(D, C, high)
df[label] = band
for label in gyro_labels:
ser = df[label]
low = signal.filtfilt(E, F, ser)
df[label] = low
return df
def orient_shank(horiz_df, vert_df, df, sensor_loc):
'''
Orient the shank sensors.
Keyword arguments:
'''
print "**Orienting sensor location: " + sensor_loc + "**\n"
horiz_avg_norm = compute_avg_accel_norm(horiz_df)
print "Average horizontal norm: %.2lf" %(horiz_avg_norm)
vert_avg_norm = compute_avg_accel_norm(vert_df)
print "Average vertical norm: %.2lf\n" %(vert_avg_norm)
# Orientation from local to body coordinate system
# Chen (thesis) 2011 2.3.3 Mounting Calibration in
# Gait feature extraction from inertial body sensor networks for medical applications
x_horiz = np.mean(horiz_df["Wide Range Accelerometer X"] * -1)
y_horiz = np.mean(horiz_df["Wide Range Accelerometer Y"])
z_horiz = np.mean(horiz_df["Wide Range Accelerometer Z"])
g_prime = np.array([x_horiz, y_horiz, z_horiz]) / horiz_avg_norm
x_vert = np.mean(vert_df["Wide Range Accelerometer X"])
y_vert = np.mean(vert_df["Wide Range Accelerometer Y"])
z_vert = np.mean(vert_df["Wide Range Accelerometer Z"])
Y_B = np.array([x_vert,y_vert,z_vert]) / vert_avg_norm
Z_B = np.cross(Y_B, g_prime)
X_B = np.cross(Y_B, Z_B)
print "G' vector: ",
print g_prime
print "X body vector: ",
print X_B
print "Y body vector: ",
print Y_B
print "Z body vector: ",
print Z_B
rotation_mat = np.array([X_B, Y_B, Z_B]).transpose()
print "\nRotation matrix:"
print rotation_mat
print "\n"
accel = np.array([df["Wide Range Accelerometer X"], \
df["Wide Range Accelerometer Y"], \
df["Wide Range Accelerometer Z"]])
gyro = np.array([df["Gyroscope X"], \
df["Gyroscope Y"], \
df["Gyroscope Z"]])
oriented_accel = np.dot(rotation_mat, accel).transpose()
oriented_gyro = np.dot(rotation_mat, gyro).transpose()
oriented_df = df.copy()
oriented_df["Gyroscope X"] = oriented_gyro[:, 0]
oriented_df["Gyroscope Y"] = oriented_gyro[:, 1]
oriented_df["Gyroscope Z"] = oriented_gyro[:, 2]
oriented_df["Wide Range Accelerometer X"] = oriented_accel[:, 0]
oriented_df["Wide Range Accelerometer Y"] = oriented_accel[:, 1]
oriented_df["Wide Range Accelerometer Z"] = oriented_accel[:, 2]
return oriented_df
def orient_COM(df):
'''
Orient the COM sensor.
Keyword arguments:
'''
print "**Orienting sensor location: COM**\n"
# GS: swapping X and Z to align with the international society of biomechanics
# where X is in the direction of travel (mounted backwards on COM, *-1)
# Y is vertical
oriented_df = df.copy()
oriented_df["Gyroscope X"] = df["Gyroscope Z"] * -1
oriented_df["Gyroscope Y"] = df["Gyroscope Y"]
oriented_df["Gyroscope Z"] = df["Gyroscope X"]
oriented_df["Wide Range Accelerometer X"] = df["Wide Range Accelerometer Z"] * -1
oriented_df["Wide Range Accelerometer Y"] = df["Wide Range Accelerometer Y"]
oriented_df["Wide Range Accelerometer Z"] = df["Wide Range Accelerometer X"]
return oriented_df
def orient_assistive_device(df, axes_df):
'''
Orient the assistive device sensor.
Keyword arguments:
'''
print "**Orienting sensor location: DEV**\n"
# GS: swapping X and Z to align with the international society of biomechanics
# where X is in the direction of travel
# Y is vertical
oriented_df = df.copy()
oriented_df["Gyroscope X"] = df["Gyroscope " + axes_df.ix["X"]["orig"]] * axes_df.ix["X"]["modifier"]
oriented_df["Gyroscope Y"] = df["Gyroscope " + axes_df.ix["Y"]["orig"]] * axes_df.ix["Y"]["modifier"]
oriented_df["Gyroscope Z"] = df["Gyroscope " + axes_df.ix["Z"]["orig"]] * axes_df.ix["Y"]["modifier"]
oriented_df["Wide Range Accelerometer X"] = \
df["Wide Range Accelerometer " + axes_df.ix["X"]["orig"]] * axes_df.ix["X"]["modifier"]
oriented_df["Wide Range Accelerometer Y"] = \
df["Wide Range Accelerometer " + axes_df.ix["Y"]["orig"]] * axes_df.ix["Y"]["modifier"]
oriented_df["Wide Range Accelerometer Z"] = \
df["Wide Range Accelerometer " + axes_df.ix["Z"]["orig"]] * axes_df.ix["Z"]["modifier"]
return oriented_df
def choose_subsection(ind_list):
'''
Get user input specifying the start and stop times.
Keyword arguments:
'''
horiz_start = float(raw_input("Enter the leg horizontal start time: "))
horiz_end = float(raw_input("Enter the leg horizontal end time: "))
horiz_start = closest_timestamp(ind_list, horiz_start)
horiz_end = closest_timestamp(ind_list, horiz_end)
print "Leg horizontal start: %lf" %(horiz_start)
print "Leg horizontal end: %lf" %(horiz_end)
vert_start = float(raw_input("Enter the leg vertical start time: "))
vert_end = float(raw_input("Enter the leg vertical end time: "))
vert_start = closest_timestamp(ind_list, vert_start)
vert_end = closest_timestamp(ind_list, vert_end)
print "Leg vertical start: %lf" %(vert_start)
print "Leg end: %lf" %(vert_end)
return (horiz_start, horiz_end, vert_start, vert_end)
def get_user_defined_sections(fname, notes_fname, section_plot_fname, \
horiz_df_fname, vert_df_fname, df, sensor_loc):
'''
Orient and filter the shank sensors.
Keyword arguments:
'''
labels = ["Horiz", "Vert"]
response = 'n'
while response != ('y' or 'yes' or 'Y'):
plot_acceleration_data(df, sensor_loc)
section_times = choose_subsection(df.index.tolist())
plot_acceleration_data(df, sensor_loc, section_plot_fname, section_times, labels)
response = raw_input("Are these sections correct?: Y/N\n")
horiz_df = df[section_times[0]:section_times[1]]
vert_df = df[section_times[2]:section_times[3]]
write_notes(notes_fname, section_times, labels)
write_data(fname, horiz_df_fname, horiz_df)
write_data(fname, vert_df_fname, vert_df)
return horiz_df, vert_df
def choose_trial_subsection(ind_list):
'''
Get user input specifying the start and stop times.
Keyword arguments:
'''
start = float(raw_input("Enter the first trial start time: "))
end = float(raw_input("Enter the first trial end time: "))
# COM start time is taking to be the literally start time i.e. time 0
start = closest_timestamp(ind_list, start)
# add an offset to the end to accomodate META file times
end = closest_timestamp(ind_list, end + TS_OFFSET)
print "First trial start: %lf" %(start)
print "First trial end: %lf" %(end)
start2 = float(raw_input("Enter the second trial start time: "))
end2 = float(raw_input("Enter the second trial end time: "))
start2 = closest_timestamp(ind_list, start2)
end2 = closest_timestamp(ind_list, end2 + TS_OFFSET)
print "Second trial start: %lf" %(start2)
print "Second trial end: %lf" %(end2)
return (start, end, start2, end2)
def get_user_defined_trial_times(fname, notes_fname, chopped_plot_fname, \
chopped_df_fname, chopped_df_fname2):
'''
Chop the files into the trials.
Keyword arguments:
'''
labels = ["T1", "T2"]
df = pd.read_csv(fname, skiprows=[0, 2, 3], header=0, index_col=0)
response = 'n'
while response != ('y' or 'yes' or 'Y'):
plot_acceleration_data(df, "HIP")
section_times = choose_trial_subsection(df.index.tolist())
plot_acceleration_data(df, "HIP", chopped_plot_fname, section_times, labels)
response = raw_input("Are these sections correct?: Y/N\n")
chopped_df = df[section_times[0]:section_times[1]]
chopped_df2 = df[section_times[2]:section_times[3]]
write_notes(notes_fname, section_times, labels)
write_data(fname, chopped_df_fname, chopped_df)
write_data(fname, chopped_df_fname2, chopped_df2)
return section_times
def chop_dependent_data(loc_fname, chopped_df_fname, chopped_df_fname2, trial_times):
'''
Chop the LA, RA, DEV, etc data files to trim them down based on start/end timestamps for COM.
Keyword arguments:
'''
df = pd.read_csv(loc_fname, skiprows=[0, 2, 3], header=0, index_col=0)
ind_list = df.index
# add an offset before COM start in order to account for nearest timestamps coming before start
start = closest_timestamp(ind_list, trial_times[0] - TS_OFFSET)
end = closest_timestamp(ind_list, trial_times[1])
start2 = closest_timestamp(ind_list, trial_times[2] - TS_OFFSET)
end2 = closest_timestamp(ind_list, trial_times[3])
first_trial_df = df[start:end]
second_trial_df = df[start2:end2]
write_data(loc_fname, chopped_df_fname, first_trial_df)
write_data(loc_fname, chopped_df_fname2, second_trial_df)
def plot_acceleration_data(df, sensor_loc, fname=None, section_times=None, labels=None):
'''
Plot the acceleration so the user can find the horiz and vert sections.
Keyword arguments:
'''
plt.figure()
plt.plot(df.index, df['Wide Range Accelerometer X'], label = 'X-axis')
plt.plot(df.index, df['Wide Range Accelerometer Y'], label = 'Y-axis')
plt.plot(df.index, df['Wide Range Accelerometer Z'], label = 'Z-axis')
if section_times is not None and labels is not None:
plt.axvspan(section_times[0], section_times[1], facecolor='b', alpha=0.5)
plt.text(section_times[0], -11, labels[0], style='italic', bbox={'facecolor':'b', 'alpha':0.8, 'pad':10})
plt.axvspan(section_times[2], section_times[3], facecolor='g', alpha=0.5)
plt.text(section_times[2], -11, labels[1], style='italic', bbox={'facecolor':'g', 'alpha':0.8, 'pad':10})
plt.xlabel('Timestamp')
plt.ylabel('Acceleration [m/s^2]')
plt.title('Sensor location: %s' %(sensor_loc))
plt.legend()
if fname is not None:
plt.savefig(fname)
plt.show()
def plot_oriented_filtered_data(df, oriented_df, oriented_filtered_df, sensor_loc):
'''
Plot the final oriented and filtered data.
Keyword arguments:
'''
plt.figure()
plt.subplot(311)
plt.plot(df.index, df["Wide Range Accelerometer X"], label='X original')
plt.plot(df.index, oriented_df["Wide Range Accelerometer X"], label='X rotated')
plt.plot(df.index, oriented_filtered_df["Wide Range Accelerometer X"], label='X filtered')
plt.legend()
plt.subplot(312)
plt.plot(df.index, df["Wide Range Accelerometer Y"], label='Y original')
plt.plot(df.index, oriented_df["Wide Range Accelerometer Y"], label='Y rotated')
plt.plot(df.index, oriented_filtered_df["Wide Range Accelerometer Y"], label='Y filtered')
plt.legend()
plt.subplot(313)
plt.plot(df.index, df["Wide Range Accelerometer Z"], label='Z original')
plt.plot(df.index, oriented_df["Wide Range Accelerometer Z"], label='Z rotated')
plt.plot(df.index, oriented_filtered_df["Wide Range Accelerometer Z"], label='Z filtered')
plt.xlabel('Timestamp')
plt.ylabel('Acceleration [m/s^2]')
plt.legend()
plt.figure()
plt.subplot(311)
plt.plot(df.index, df["Gyroscope X"], label='X original')
plt.plot(df.index, oriented_df["Gyroscope X"], label='X rotated')
plt.plot(df.index, oriented_filtered_df["Gyroscope X"], label='X filtered')
plt.legend()
plt.subplot(312)
plt.plot(df.index, df["Gyroscope Y"], label='Y original')
plt.plot(df.index, oriented_df["Gyroscope Y"], label='Y rotated')
plt.plot(df.index, oriented_filtered_df["Gyroscope Y"], label='Y filtered')
plt.legend()
plt.subplot(313)
plt.plot(df.index, df["Gyroscope Z"], label='Z original')
plt.plot(df.index, oriented_df["Gyroscope Z"], label='Z rotated')
plt.plot(df.index, oriented_filtered_df["Gyroscope Z"], label='Z filtered')
plt.xlabel('Timestamp')
plt.ylabel('Angular velocity [deg/s]')
plt.legend()
plt.show()
def write_notes(fname, section_times, labels):
'''
Write the horiz and vert sections timestamps for record.
Keyword arguments:
'''
print "Saving section times..."
print labels[0] + " [%lf:%lf]" %(section_times[0], section_times[1])
print labels[1] + "Vertical [%lf:%lf]" %(section_times[2], section_times[3])
fout = open(fname, "w")
fout.write(labels[0] + " [%lf:%lf]\n" %(section_times[0], section_times[1]))
fout.write(labels[1] + "Vertical [%lf:%lf]" %(section_times[2], section_times[3]))
fout.close()
def write_data(orig_fname, section_fname, df):
'''
Write the horiz and vert sections for record.
Keyword arguments:
'''
# read in the original header
fin = open(orig_fname, "r")
fout = open(section_fname, "w")
# write out the original header
fout.write(fin.readline())
fout.write(fin.readline())
fout.write(fin.readline())
fout.write(fin.readline())
fin.close()
# write out the orientation sections
index = df.index.tolist()
for i in range(len(index)):
fout.write(str(index[i]))
fout.write(", ")
row = df.iloc[i].tolist()
for j in range(len(row) - 1):
fout.write(str(row[j]))
fout.write(", ")
fout.write(str(row[len(row) - 1]))
fout.write("\n")
fout.close()
| gpl-3.0 |
reetawwsum/Supervised-Learning | titanic.py | 1 | 1610 | '''
=================
Titanic - Kaggle
=================
'''
import csv
from collections import Counter
import numpy as np
from sklearn import preprocessing
from sklearn import tree
from sklearn import cross_validation
from sklearn import metrics
from common.fn import *
file_path = 'datasets/titanic/'
file_name = 'train.csv'
def load_titanic(file_path, file_name):
titanic = {}
with open(file_path+file_name, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
first_row = reader.next()
X, y = [], []
for row in reader:
X.append(row)
y.append(row[1])
titanic['data'] = np.array(X)
titanic['target'] = np.array(y)
titanic['feature_names'] = np.array(first_row)
return titanic
titanic = load_titanic(file_path, file_name)
X = titanic['data'][:, [2, 4, 5]]
y = titanic['target']
feature_names = titanic['feature_names'][[2, 4, 5]]
# Filling missing age
ages = X[:, 2]
mean_age = np.mean(X[ages != '', 2].astype(np.float))
X[ages == '', 2] = mean_age
# Converting sex into real values
le = preprocessing.LabelEncoder()
le.fit(X[:, 1])
X[:, 1] = le.transform(X[:, 1])
X = X.astype(float)
y = y.astype(float)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.10, random_state=42)
clf = tree.DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_predict = clf.predict(X_test)
for i in xrange(len(y_predict)):
if y_predict[i] == 0:
if X_test[i][0] == 1 and X_test[i][1] == 0:
y_predict[i] = 1
if X_test[i][1] == 0 and X_test[i][2] < 20:
y_predict[i] = 1
print metrics.accuracy_score(y_test, y_predict) | mit |
abimannans/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
fyffyt/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
evanbiederstedt/CMBintheLikeHoodz | source_code/Feb25_2016_Multinorm_draw.py | 1 | 8572 |
# coding: utf-8
import math
import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
import astropy as ap
import scipy
import scipy.io
from scipy.special import eval_legendre ##special scipy function
import os
os.getcwd()
os.chdir("/Users/evanbiederstedt/downloads")
# In[11]:
# For lmax = 1100, we must create an array of ell values, i.e. [0 1 2 3....1599 1600]
ell = np.arange(1101)
#print(ell)
#
# Subtract the monopole and dipole, l=0, l=1
ellval = ell[2:]
#print(ellval)
# In[12]:
#
# Vary Baryon, patch 3
#
PlM_50 = "cl_varyBaryonlmax1100patch3PlMat50.npy"
PlM_100 = "cl_varyBaryonlmax1100patch3PlMat100.npy"
PlM_150 = "cl_varyBaryonlmax1100patch3PlMat150.npy"
data1 = np.load(PlM_50)
data2 = np.load(PlM_100)
data3 = np.load(PlM_150)
print(data1.shape)
print(data2.shape)
print(data3.shape)
# see script CAMB_vary_OmegaB_lmax1100_Feb2016.py
ff = "CAMB_cl_varyBaryon_lmax1100varyFeb2016.npy"
cell_array_loaded = np.load(ff)
cell_array = cell_array_loaded*(1e10)
print(cell_array.shape)
# In[13]:
PlMat_total = np.concatenate((data1, data2, data3)) # this is P_2(M), P_3(M), ..., P_lmax (M)
PlMat_total.shape
PlMat = PlMat_total
# Step 3: (2*l +1)/4pi from l=2 to l=lmax
# [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ]
norm = ((2*ellval + 1))/(4*math.pi)
print(len(ellval))
print("******")
print(norm.shape)
print("*****")
print(PlMat.shape)
# In[15]:
# Multiply to get:
# [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)]
#
# multiply PlMat by (2*l+1)/4pi, i.e. norm
norm_matrix = norm[:, None, None] * PlMat
# [5/4pi * P_2(M) 7/4pi * P_3(M) .... 65/4pi * P_32(M)]
"""
# In[17]:
# define pixel-value arrays
mT = np.matrix(patch) # mT.shape = (1, 768)
m = np.matrix(patch).T # m.shape = (768, 1)
Npix2pi = (len(patch))*2*math.pi # LF constant
print(mT.shape)
print(m.shape)
print(Npix2pi)
"""
# In[18]:
#
# vary Omega_Baryon
#
# Planck found \Omega_B = 0.02234
# GAVO simulated map set at \Omega_B = 0.04
# CAMB default below at ombh2=0.022
#
twohundred_samples = np.linspace(0.005, 0.05, num=200)
print(twohundred_samples)
# In[19]:
#
# Step A: Set up one covariance matrix C=S+N at the "standard" set of cosmological parameters
# pars.set_cosmology(H0=67.5, ombh2=0.022, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
#
# In[21]:
#
# Multiply by 1e10 to avoid underflow
#
"""
tempp = patch*(1e10)
noise = noisepatch*(1e10)
cell = cls0*(1e10)
"""
fname_draw = "Feb12_2016_Multinormdraw.npy"
fname_noise = "Feb12_2016_noise.npy"
draws = np.load(fname_draw)
noise = np.load(fname_noise)
# In[28]:
id_matrix = np.identity(len(noise))
"""
id_matrix = np.identity(len(noise))
Nij = noise**2 * id_matrix
Cij = Sij + Nij
print(Cij.shape)
"""
Npix2pi = (len(noise))*2*math.pi # LF constant
# In[35]:
#
# If we generate data from our model, then the likelihood MUST, on average, peak at the correct parameters.
#
# So generate (from the C=S+N matrix) 1000 patches at one set of cosmological parameters,
# and compute the logLF as a function of one of those parameters,
# and show that we have a peak at the right parameters.
#
#
# In[36]:
#
# LogLF function of cell, i.e. theoretical Boltzmann values C_ell and tempp, i.e. pixel values of the patch
#
def LogLF_twoargs(cell, tempp):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms + logdetC[1] + Npix2pi
# In[37]:
def modelfit_twoargs(cell, tempp):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
#logdetC = np.linalg.slogdet(Cij)
return model_fit_terms
# In[38]:
def logdet_twoargs(cell, tempp):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return logdetC[1]
# In[39]:
#
# set 'cell' with 'cell_array', i.e. Boltzmann code values, i.e. values from "CAMB_cl_varyBaryonlmax1100vary.npy",
# from Python3.4 script CAMB_vary_OmegaBaryon_Dec_9_lmax1100.py
#
# These are Boltzmann code results of varying Omega_baryon from 0.005 to 0.05
# forty_samples = np.linspace(0.005, 0.05, num=40)
# i.e.
# pars.set_cosmology(H0=67.5, ombh2=0.005, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
# 0.005
# pars.set_cosmology(H0=67.5, ombh2=0.00615385, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
# 0.00615385
# pars.set_cosmology(H0=67.5, ombh2=0.00730769, omch2=0.122, mnu=0.06, omk=0, tau=0.06)
# 0.00730769
# etc. etc.
#
# In[40]:
#
# 'tempp' is 'draws' i.e.
# draws = np.random.multivariate_normal(zero_mean, Cij, 1000)
# where draws.shape = (1000, 768), i.e. each 'draw' is vector (768,)
#
# In[41]:
# draw_points = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(40)] for j in range(1000)])
# In[42]:
LogLF_draw0 = np.asarray([LogLF_twoargs(cell_array[i], draws[0]) for i in range(201)])
# In[43]:
LogLF_draw012 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in range(3)])
# In[44]:
#print(draw_points.shape)
print("*******")
print(LogLF_draw0.shape)
print("*******")
print(LogLF_draw012.shape)
# In[45]:
cell_array.shape
# In[46]:
cell_array[0].shape
# In[ ]:
# In[47]:
draws.shape
# In[48]:
draws[0].shape
# In[49]:
LogLF_draw01234 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in range(5)])
# In[50]:
LogLF_draw01234.shape
# In[ ]:
# In[51]:
LogLF_draw01234[0].shape
# In[52]:
LogLF_draw01234[0][0].shape
# In[53]:
draw_points_100 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(0,100)])
# In[54]:
draw_points_100.shape
# In[ ]:
f1 = "draw_points_100_twohundred.npy"
np.save(f1, draw_points_100)
# In[ ]:
draw_points_200 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(100,200)])
# In[ ]:
f2 = "draw_points_200_twohundred.npy"
np.save(f2, draw_points_200)
# In[ ]:
draw_points_300 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(200,300)])
# In[ ]:
f3 = "draw_points_300_twohundred.npy"
np.save(f3, draw_points_300)
# In[ ]:
draw_points_400 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(300,400)])
# In[ ]:
f4 = "draw_points_400_twohundred.npy"
np.save(f4, draw_points_400)
# In[ ]:
draw_points_500 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(400,500)])
# In[ ]:
f5 = "draw_points_500_twohundred.npy"
np.save(f5, draw_points_500)
# In[ ]:
draw_points_600 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(500,600)])
# In[ ]:
f6 = "draw_points_600_twohundred.npy"
np.save(f6, draw_points_600)
# In[ ]:
draw_points_700 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(600,700)])
# In[ ]:
f7 = "draw_points_700_twohundred.npy"
np.save(f7, draw_points_700)
# In[ ]:
draw_points_800 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(700,800)])
# In[ ]:
f8 = "draw_points_800_twohundred.npy"
np.save(f8, draw_points_800)
# In[ ]:
draw_points_900 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(800,900)])
# In[ ]:
f9 = "draw_points_900_twohundred.npy"
np.save(f9, draw_points_900)
# In[ ]:
draw_points_1000 = np.asarray([[LogLF_twoargs(cell_array[i], draws[j]) for i in range(201)] for j in np.arange(900,1000)])
# In[ ]:
f10 = "draw_points_1000_twohundred.npy"
np.save(f10, draw_points_1000)
| mit |
idealabasu/code_pynamics | python/pynamics_examples/cart_pendulum.py | 1 | 2892 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
import pynamics.integration
#import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
tol=1e-7
l = Constant(.5,'l',system)
xO = Constant(0, 'xO',system)
M = Constant(10,'M',system)
m = Constant(10,'m',system)
I_xx = Constant(9,'I_xx',system)
I_yy = Constant(9,'I_yy',system)
I_zz = Constant(9,'I_zz',system)
g = Constant(9.81,'g',system)
b = Constant(5e1,'b',system)
k = Constant(1e3,'k',system)
tinitial = 0
tfinal = 10
tstep = 1/30
t = numpy.r_[tinitial:tfinal:tstep]
x,x_d,x_dd = Differentiable('x',system)
q,q_d,q_dd = Differentiable('q',system)
initialvalues = {}
initialvalues[x]=.5
initialvalues[x_d]=0
initialvalues[q]=30*pi/180
initialvalues[q_d]=0*pi/180
statevariables = system.get_state_variables()
ini = [initialvalues[item] for item in statevariables]
N = Frame('N')
A = Frame('A')
system.set_newtonian(N)
A.rotate_fixed_axis_directed(N,[0,0,1],q,system)
p1 = x*N.x
p2 = p1 - l*A.y
v1 = p1.time_derivative(N,system)
v2 = p2.time_derivative(N, system)
I = Dyadic.build(A,I_xx,I_yy,I_zz)
BodyA = Body('BodyA',A,p2,m,I,system)
ParticleO = Particle(p2,M,'ParticleO',system)
stretch = x-xO
system.add_spring_force1(k,(stretch)*N.x,v1)
system.addforce(-b*v1,v1)
system.addforcegravity(-g*N.y)
eq = []
eq_d= [system.derivative(item) for item in eq]
eq_dd= [system.derivative(item) for item in eq_d]
f,ma = system.getdynamics()
func1 = system.state_space_post_invert(f,ma,eq_dd,constants = system.constant_values)
states=pynamics.integration.integrate_odeint(func1,ini,t,rtol=tol,atol=tol,args=({'constants':{},'alpha':1e2,'beta':1e1},))
# =============================================================================
KE = system.get_KE()
PE = system.getPEGravity(0*N.x) - system.getPESprings()
energy = Output([KE-PE])
energy.calc(states)
energy.plot_time()
# =============================================================================
points_list = [p1,p2]
#points_list = [item2 for item in points_list for item2 in [item.dot(N.x),item.dot(N.y)]]
#points = Output(points_list)
#y = points.calc(states)
#y = y.reshape((-1,2,2))
#plt.figure()
#plt.plot(y[:,1,0],y[:,1,1])
#plt.axis('equal')
states2= Output([x,q])
states2.calc(states)
plt.figure()
plt.plot(states[:,0])
plt.figure()
plt.plot(states[:,1])
points2 = PointsOutput(points_list)
points2.calc(states)
#points2.plot_time()
points2.animate(fps = 30, movie_name='cart_pendulum.mp4',lw=2)
| mit |
rahuldhote/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
esc/dask | dask/dataframe/core.py | 2 | 26266 | from itertools import count
from math import ceil, sqrt
from functools import wraps
import bisect
import os
from toolz import (merge, partial, accumulate, unique, first, dissoc, valmap,
first, partition)
from operator import getitem, setitem
import pandas as pd
import numpy as np
import operator
import gzip
import bz2
from pframe import pframe
import bcolz
try:
from chest import Chest as Cache
except ImportError:
Cache = dict
from .. import array as da
from .. import core
from ..array.core import partial_by_order
from ..async import get_sync
from ..threaded import get as get_threaded
from ..compatibility import unicode, apply
from ..utils import repr_long_list, IndexCallable
def _concat(args):
""" Generic concat operation """
if not args:
return args
if isinstance(first(core.flatten(args)), np.ndarray):
return da.core.rec_concatenate(args)
if len(args) == 1:
return args[0]
if isinstance(args[0], (pd.DataFrame, pd.Series)):
args2 = [arg for arg in args if len(arg)]
if not args2:
return args[0]
return pd.concat(args2)
if isinstance(args[0], (pd.Index)):
args = [arg for arg in args if len(arg)]
result = pd.concat(map(pd.Series, args))
result = type(args[0])(result.values)
result.name = args[0].name
return result
return args
def compute(*args, **kwargs):
""" Compute multiple frames at once """
if len(args) == 1 and isinstance(args[0], (tuple, list)):
args = args[0]
dsk = merge(*[arg.dask for arg in args])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return list(map(_concat, results))
names = ('f-%d' % i for i in count(1))
class Scalar(object):
""" A Dask-thing to represent a scalar
TODO: Clean up this abstraction
"""
def __init__(self, dsk, _name):
self.dask = dsk
self._name = _name
self.divisions = []
@property
def _args(self):
return (self.dask, self._name)
def _keys(self):
return [(self._name, 0)]
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
class _Frame(object):
""" Superclass for DataFrame and Series """
@property
def npartitions(self):
return len(self.divisions) + 1
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
def _keys(self):
return [(self._name, i) for i in range(self.npartitions)]
def _visualize(self, optimize_graph=False):
from dask.dot import dot_graph
if optimize_graph:
dot_graph(optimize(self.dask, self._keys()))
else:
dot_graph(self.dask)
@property
def index(self):
name = self._name + '-index'
dsk = dict(((name, i), (getattr, key, 'index'))
for i, key in enumerate(self._keys()))
return Index(merge(dsk, self.dask), name, None, self.divisions)
@property
def known_divisions(self):
return len(self.divisions) > 0 and self.divisions[0] is not None
def cache(self, cache=Cache):
""" Evaluate Dataframe and store in local cache
Uses chest by default to store data on disk
"""
if callable(cache):
cache = cache()
# Evaluate and store in cache
name = next(names)
dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))
for i, key in enumerate(self._keys()))
get(merge(dsk, self.dask), list(dsk.keys()))
# Create new Frame pointing to that cache
dsk2 = dict((key, (getitem, cache, (tuple, list(key))))
for key in self._keys())
return type(self)(dsk2, name, self.column_info, self.divisions)
def drop_duplicates(self):
chunk = lambda s: s.drop_duplicates()
return aca(self, chunk=chunk, aggregate=chunk, columns=self.columns)
def __len__(self):
return reduction(self, len, np.sum).compute()
def map_blocks(self, func, columns=None):
""" Apply Python function on each DataFrame block
Provide columns of the output if they are not the same as the input.
"""
if columns is None:
columns = self.column_info
name = next(names)
dsk = dict(((name, i), (func, (self._name, i)))
for i in range(self.npartitions))
return type(self)(merge(dsk, self.dask), name,
columns, self.divisions)
def head(self, n=10, compute=True):
""" First n rows of the dataset
Caveat, the only checks the first n rows of the first partition.
"""
name = next(names)
dsk = {(name, 0): (head, (self._name, 0), n)}
result = type(self)(merge(self.dask, dsk), name,
self.column_info, [])
if compute:
result = result.compute()
return result
def _partition_of_index_value(self, val):
""" In which partition does this value lie? """
return bisect.bisect_right(self.divisions, val)
def _loc(self, ind):
""" Helper function for the .loc accessor """
if not self.known_divisions:
raise ValueError(
"Can not use loc on DataFrame without known divisions")
name = next(names)
if not isinstance(ind, slice):
part = self._partition_of_index_value(ind)
dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}
return type(self)(merge(self.dask, dsk), name,
self.column_info, [])
else:
assert ind.step in (None, 1)
if ind.start:
start = self._partition_of_index_value(ind.start)
else:
start = 0
if ind.stop is not None:
stop = self._partition_of_index_value(ind.stop)
else:
stop = self.npartitions - 1
if stop == start:
dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}
else:
dsk = merge(
{(name, 0): (_loc, (self._name, start), ind.start, None)},
dict(((name, i), (self._name, start + i))
for i in range(1, stop - start)),
{(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})
return type(self)(merge(self.dask, dsk), name, self.column_info,
self.divisions[start:stop])
@property
def loc(self):
return IndexCallable(self._loc)
@property
def iloc(self):
raise AttributeError("Dask Dataframe does not support iloc")
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
class Series(_Frame):
""" Out-of-core Series object
Mimics ``pandas.Series``.
See Also
--------
dask.dataframe.DataFrame
"""
_partition_type = pd.Series
def __init__(self, dsk, _name, name, divisions):
self.dask = dsk
self._name = _name
self.name = name
self.divisions = divisions
@property
def _args(self):
return (self.dask, self._name, self.name, self.divisions)
@property
def dtype(self):
return self.head().dtype
@property
def column_info(self):
return self.name
@property
def columns(self):
return (self.name,)
def __repr__(self):
return ("dd.Series<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
def quantiles(self, q):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
return quantiles(self, q)
def __getitem__(self, key):
name = next(names)
if isinstance(key, Series) and self.divisions == key.divisions:
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return Series(merge(self.dask, key.dask, dsk), name,
self.name, self.divisions)
raise NotImplementedError()
def __abs__(self):
return elemwise(operator.abs, self)
def __add__(self, other):
return elemwise(operator.add, self, other)
def __radd__(self, other):
return elemwise(operator.add, other, self)
def __and__(self, other):
return elemwise(operator.and_, self, other)
def __rand__(self, other):
return elemwise(operator.and_, other, self)
def __div__(self, other):
return elemwise(operator.div, self, other)
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
def __eq__(self, other):
return elemwise(operator.eq, self, other)
def __gt__(self, other):
return elemwise(operator.gt, self, other)
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.inv, self)
def __lt__(self, other):
return elemwise(operator.lt, self, other)
def __le__(self, other):
return elemwise(operator.le, self, other)
def __mod__(self, other):
return elemwise(operator.mod, self, other)
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
def __mul__(self, other):
return elemwise(operator.mul, self, other)
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __ror__(self, other):
return elemwise(operator.or_, other, self)
def __pow__(self, other):
return elemwise(operator.pow, self, other)
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
def __sub__(self, other):
return elemwise(operator.sub, self, other)
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
def __xor__(self, other):
return elemwise(operator.xor, self, other)
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
def sum(self):
return reduction(self, pd.Series.sum, np.sum)
def max(self):
return reduction(self, pd.Series.max, np.max)
def min(self):
return reduction(self, pd.Series.min, np.min)
def count(self):
return reduction(self, pd.Series.count, np.sum)
def mean(self):
def chunk(ser):
return (ser.sum(), ser.count())
def agg(seq):
sums, counts = list(zip(*seq))
return 1.0 * sum(sums) / sum(counts)
return reduction(self, chunk, agg)
def var(self, ddof=1):
def chunk(ser):
return (ser.sum(), (ser**2).sum(), ser.count())
def agg(seq):
x, x2, n = list(zip(*seq))
x = float(sum(x))
x2 = float(sum(x2))
n = sum(n)
result = (x2 / n) - (x / n)**2
if ddof:
result = result * n / (n - ddof)
return result
return reduction(self, chunk, agg)
def std(self, ddof=1):
name = next(names)
f = self.var(ddof=ddof)
dsk = {(name, 0): (sqrt, (f._name, 0))}
return Scalar(merge(f.dask, dsk), name)
def value_counts(self):
chunk = lambda s: s.value_counts()
agg = lambda s: s.groupby(level=0).sum()
return aca(self, chunk=chunk, aggregate=agg, columns=self.columns)
def isin(self, other):
return elemwise(pd.Series.isin, self, other)
@wraps(pd.Series.map)
def map(self, arg, na_action=None):
return elemwise(pd.Series.map, self, arg, na_action, name=self.name)
class Index(Series):
pass
class DataFrame(_Frame):
"""
Implements out-of-core DataFrame as a sequence of pandas DataFrames
This is a work in progress. It is buggy and far from complete.
Please do not use it yet.
Parameters
----------
dask: dict
The dask graph to compute this Dataframe
name: str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame
columns: list of strings
Column names. This metadata aids usability
divisions: tuple of index values
Values along which we partition our blocks on the index
"""
_partition_type = pd.DataFrame
def __init__(self, dask, name, columns, divisions):
self.dask = dask
self._name = name
self.columns = tuple(columns)
self.divisions = tuple(divisions)
@property
def _args(self):
return (self.dask, self._name, self.columns, self.divisions)
def __getitem__(self, key):
if isinstance(key, (str, unicode)):
name = self._name + '.' + key
if key in self.columns:
dsk = dict(((name, i), (operator.getitem, (self._name, i), key))
for i in range(self.npartitions))
return Series(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, list):
name = '%s[%s]' % (self._name, str(key))
if all(k in self.columns for k in key):
dsk = dict(((name, i), (operator.getitem,
(self._name, i),
(list, key)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, dsk), name,
key, self.divisions)
if isinstance(key, Series) and self.divisions == key.divisions:
name = next(names)
dsk = dict(((name, i), (operator.getitem, (self._name, i),
(key._name, i)))
for i in range(self.npartitions))
return DataFrame(merge(self.dask, key.dask, dsk), name,
self.columns, self.divisions)
raise NotImplementedError()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except NotImplementedError:
raise AttributeError()
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.columns)))
def __repr__(self):
return ("dd.DataFrame<%s, divisions=%s>" %
(self._name, repr_long_list(self.divisions)))
@property
def dtypes(self):
return get(self.dask, self._keys()[0]).dtypes
def set_index(self, other, **kwargs):
return set_index(self, other, **kwargs)
def set_partition(self, column, divisions, **kwargs):
""" Set explicit divisions for new column index
>>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP
See also:
set_index
"""
return set_partition(self, column, divisions, **kwargs)
@property
def column_info(self):
return self.columns
def groupby(self, key, **kwargs):
return GroupBy(self, key, **kwargs)
def categorize(self, columns=None, **kwargs):
return categorize(self, columns, **kwargs)
@wraps(pd.DataFrame.assign)
def assign(self, **kwargs):
pairs = list(sum(kwargs.items(), ()))
# Figure out columns of the output
df = pd.DataFrame(columns=self.columns)
df2 = df.assign(**dict((k, []) for k in kwargs))
return elemwise(_assign, self, *pairs, columns=list(df2.columns))
def _assign(df, *pairs):
kwargs = dict(partition(2, pairs))
return df.assign(**kwargs)
def _loc(df, start, stop):
return df.loc[slice(start, stop)]
def head(x, n):
""" First n elements of dask.Dataframe or dask.Series """
return x.head(n)
def consistent_name(names):
""" New name for series in elementwise operation
If all truthy names are the same, choose that one, otherwise, choose None
"""
allnames = set()
for name in names:
if name is None:
continue
if isinstance(name, (tuple, list)):
allnames.update(name)
else:
allnames.add(name)
if len(allnames) == 1:
return first(allnames)
else:
return None
def elemwise(op, *args, **kwargs):
""" Elementwise operation for dask.Dataframes """
columns = kwargs.get('columns', None)
name = kwargs.get('name', None)
_name = next(names)
frames = [arg for arg in args if isinstance(arg, _Frame)]
other = [(i, arg) for i, arg in enumerate(args)
if not isinstance(arg, _Frame)]
if other:
op2 = partial_by_order(op, other)
else:
op2 = op
assert all(f.divisions == frames[0].divisions for f in frames)
assert all(f.npartitions == frames[0].npartitions for f in frames)
dsk = dict(((_name, i), (op2,) + frs)
for i, frs in enumerate(zip(*[f._keys() for f in frames])))
if columns is not None:
return DataFrame(merge(dsk, *[f.dask for f in frames]),
_name, columns, frames[0].divisions)
else:
column_name = name or consistent_name(n for f in frames
for n in f.columns)
return Series(merge(dsk, *[f.dask for f in frames]),
_name, column_name, frames[0].divisions)
def reduction(x, chunk, aggregate):
""" General version of reductions
>>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP
"""
a = next(names)
dsk = dict(((a, i), (chunk, (x._name, i)))
for i in range(x.npartitions))
b = next(names)
dsk2 = {(b, 0): (aggregate, (tuple, [(a,i) for i in range(x.npartitions)]))}
return Scalar(merge(x.dask, dsk, dsk2), b)
def concat(dfs):
""" Concatenate dataframes along rows
Currently only supports unknown divisions
"""
if any(df.known_divisions for df in dfs):
# For this to work we need to add a final division for "maximum element"
raise NotImplementedError("Concat can't currently handle dataframes"
" with known divisions")
name = next(names)
dsk = dict()
i = 0
for df in dfs:
for key in df._keys():
dsk[(name, i)] = key
i += 1
divisions = [None] * (i - 1)
return DataFrame(merge(dsk, *[df.dask for df in dfs]), name,
dfs[0].columns, divisions)
class GroupBy(object):
def __init__(self, frame, index=None, **kwargs):
self.frame = frame
self.index = index
self.kwargs = kwargs
if isinstance(index, list):
assert all(i in frame.columns for i in index)
elif isinstance(index, Series):
assert index.divisions == frame.divisions
else:
assert index in frame.columns
def apply(self, func, columns=None):
if isinstance(self.index, Series) and self.index._name == self.frame.index._name:
f = self.frame
else:
f = set_index(self.frame, self.index, **self.kwargs)
return f.map_blocks(lambda df: df.groupby(level=0).apply(func),
columns=columns)
def __getitem__(self, key):
if key in self.frame.columns:
return SeriesGroupBy(self.frame, self.index, key)
else:
raise KeyError()
def __dir__(self):
return sorted(set(list(dir(type(self))) + list(self.frame.columns)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError()
class SeriesGroupBy(object):
def __init__(self, frame, index, key, **kwargs):
self.frame = frame
self.index = index
self.key = key
self.kwargs = kwargs
def apply(func, columns=None):
f = set_index(self.frame, self.index, **self.kwargs)
return f.map_blocks(lambda df:df.groupby(level=0)[self.key].apply(func),
columns=columns)
def sum(self):
chunk = lambda df, index: df.groupby(index)[self.key].sum()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def min(self):
chunk = lambda df, index: df.groupby(index)[self.key].min()
agg = lambda df: df.groupby(level=0).min()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def max(self):
chunk = lambda df, index: df.groupby(index)[self.key].max()
agg = lambda df: df.groupby(level=0).max()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def count(self):
chunk = lambda df, index: df.groupby(index)[self.key].count()
agg = lambda df: df.groupby(level=0).sum()
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[self.key])
def mean(self):
def chunk(df, index):
g = df.groupby(index)
return g.agg({self.key: ['sum', 'count']})
def agg(df):
g = df.groupby(level=0)
x = g.agg({(self.key, 'sum'): 'sum',
(self.key, 'count'): 'sum'})
return 1.0 * x[self.key]['sum'] / x[self.key]['count']
return aca([self.frame, self.index],
chunk=chunk, aggregate=agg, columns=[])
def apply_concat_apply(args, chunk=None, aggregate=None, columns=None):
""" Apply a function to blocks, the concat, then apply again
Parameters
----------
args: dask.DataFrames
All Dataframes should be partitioned and indexed equivalently
chunk: function [block-per-arg] -> block
Function to operate on each block of data
aggregate: function concatenated-block -> block
Function to operate on the concatenated result of chunk
>>> def chunk(a_block, b_block):
... pass
>>> def agg(df):
... pass
>>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP
"""
if not isinstance(args, (tuple, list)):
args = [args]
assert all(arg.npartitions == args[0].npartitions
for arg in args
if isinstance(arg, _Frame))
a = next(names)
dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)
if isinstance(x, _Frame)
else x for x in args])))
for i in range(args[0].npartitions))
b = next(names)
dsk2 = {(b, 0): (aggregate,
(pd.concat,
(list, [(a, i) for i in range(args[0].npartitions)])))}
return type(args[0])(
merge(dsk, dsk2, *[a.dask for a in args
if isinstance(a, _Frame)]),
b, columns, [])
aca = apply_concat_apply
def categorize_block(df, categories):
""" Categorize a dataframe with given categories
df: DataFrame
categories: dict mapping column name to iterable of categories
"""
df = df.copy()
for col, vals in categories.items():
df[col] = pd.Categorical(df[col], categories=vals,
ordered=False, name=col)
return df
def categorize(f, columns=None, **kwargs):
"""
Convert columns of dask.frame to category dtype
This greatly aids performance, both in-memory and in spilling to disk
"""
if columns is None:
dtypes = f.dtypes
columns = [name for name, dt in zip(dtypes.index, dtypes.values)
if dt == 'O']
if not isinstance(columns, (list, tuple)):
columns = [columns]
distincts = [f[col].drop_duplicates() for col in columns]
values = compute(distincts, **kwargs)
func = partial(categorize_block, categories=dict(zip(columns, values)))
return f.map_blocks(func, columns=f.columns)
def quantiles(f, q, **kwargs):
""" Approximate quantiles of column
q : list/array of floats
Iterable of numbers ranging from 0 to 100 for the desired quantiles
"""
assert len(f.columns) == 1
from dask.array.percentile import _percentile, merge_percentiles
name = next(names)
val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), q))
for i, key in enumerate(f._keys()))
name2 = next(names)
len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(f._keys()))
name3 = next(names)
merge_dsk = {(name3, 0): (merge_percentiles, q, [q] * f.npartitions,
sorted(val_dsk),
sorted(len_dsk))}
dsk = merge(f.dask, val_dsk, len_dsk, merge_dsk)
return da.Array(dsk, name3, chunks=((len(q),),))
def get(dsk, keys, get=get_sync, **kwargs):
""" Get function with optimizations specialized to dask.Dataframe """
from .optimize import optimize
dsk2 = optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs) # use synchronous scheduler for now
from .shuffle import set_index, set_partition
| bsd-3-clause |
jreback/pandas | pandas/tests/series/methods/test_between.py | 4 | 1197 | import numpy as np
from pandas import Series, bdate_range, date_range, period_range
import pandas._testing as tm
class TestBetween:
# TODO: redundant with test_between_datetime_values?
def test_between(self):
series = Series(date_range("1/1/2000", periods=10))
left, right = series[[2, 7]]
result = series.between(left, right)
expected = (series >= left) & (series <= right)
tm.assert_series_equal(result, expected)
def test_between_datetime_values(self):
ser = Series(bdate_range("1/1/2000", periods=20).astype(object))
ser[::2] = np.nan
result = ser[ser.between(ser[3], ser[17])]
expected = ser[3:18].dropna()
tm.assert_series_equal(result, expected)
result = ser[ser.between(ser[3], ser[17], inclusive=False)]
expected = ser[5:16].dropna()
tm.assert_series_equal(result, expected)
def test_between_period_values(self):
ser = Series(period_range("2000-01-01", periods=10, freq="D"))
left, right = ser[[2, 7]]
result = ser.between(left, right)
expected = (ser >= left) & (ser <= right)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
fhorinek/SkyDrop | skydrop/utils/airspace/convert.py | 2 | 18075 | #!/usr/bin/env python3
# -*- mode: python-mode; python-indent-offset: 4 -*-
#*****************************************************************************
# dnf install python3-shapely python3-gdal python3-matplotlib
#
# This program is used to read a "Open-Airspace-file" containing a number of
# airspaces and then computing a raster of positions around these airspaces.
# Each raster point has an angle and distance showing to the nearest airspace.
#
# This can be used to generate AIR files used by SkyDrop variometer to help
# the pilot avoid flying into forbidden airspaces.
#
# Various checkpoints for using with "-c":
# * Möhringen: 48.723957,9.153292
# * Nabern: 48.614241,9.475000
# * Grabenstetten: 48.536363,9.437542
# * Bad Urach: 48.490105,9.394959
# * Degerloch: 48.745936,9.169557
# * Kornwestheim: 48.864396,9.223579
# * Markgroenningen: 48.908029,9.085469
# * Boenigheim: 49.039651,9.095502
#
# 2018-12-23, tilmann@bubecks.de
import sys
import re
import os
from Airspace import Airspace
from AirspaceVector import AirspaceVector
from pprint import pprint
import openaip
import shapely
import shapely.ops
import shapely.geometry
import matplotlib.pyplot as plt
import numpy
import time
import multiprocessing
import getopt
import struct
import cProfile
from const import *
from gps_calc import *
bVerbose = 0
wantedResolution = 200
latOnly = lonOnly = None
force = False
checkPoint = None
mk_list = None
DataSource = None
inspect = False
airspaces = []
def getBoundingBox(airspaces):
bb = [+100, +200, -100, -200]
for airspace in airspaces:
bb2 = airspace.getBoundingBox()
bb[0] = min(bb[0], bb2[0])
bb[1] = min(bb[1], bb2[1])
bb[2] = max(bb[2], bb2[2])
bb[3] = max(bb[3], bb2[3])
return bb
def dumpPoint(output, offset, p, airspaces, check=False):
def av_score(av):
score = 0
if av.isInside():
dist_score = 1
else:
dist_score = 1 - min(av.getDistance() / (33.0), 1)
score += dist_score * 10000
alt_score = 1 - min(av.airspace.getMin()[0] / 10000, 1)
score += alt_score * 1000
class_score = CLASS_SCORE[av.airspace.getClass()]
score += class_score * 100
av.scores = {}
av.scores["D"] = dist_score
av.scores["A"] = alt_score
av.scores["C"] = class_score
av.scores["T"] = score
return score
global bVerbose
global mk_list
avs = []
#get all airspaces in proximity
for airspace in airspaces:
if airspace.isNear(p):
av = airspace.getAirspaceVector(p)
avs.append(av)
if mk_list:
return True
#sort by importance
avs.sort(key = av_score, reverse = True)
if check and bVerbose > 1:
print ("Airspaces here:")
i = 0
for av in avs:
air = av.airspace
if i == DATA_LEVELS:
print("-" * 120)
i += 1
str_score = ""
for key in av.scores:
str_score += "%s %0.3f " % (key, av.scores[key])
str_score = str_score[:-1]
print("%50s %5u - %-5u %2s %5.2fkm %10s %s" %
(air.getName(), air.getMin()[0], air.getMax()[0], "IN" if av.isInside() else "", av.getDistance(), air.getClass(), str_score))
#cut to max levels
avs = avs[:DATA_LEVELS]
#sort by min alt
avs.sort(key = lambda av: av.airspace.getMin())
if output is not None:
#store to data file
for av in avs:
for byte in av.getBytes():
output[offset] = byte
offset += 1
# Fill up with empty Airspaces
av = AirspaceVector()
for i in range(DATA_LEVELS - len(avs)):
for byte in av.getBytes():
output[offset] = byte
offset += 1
return len(avs)
class Indexer(object):
def __init__(self):
self.num = 0
self.list = {}
def getNext(self, airspace):
old = self.num
self.num += 1
#print("Indexing: %3u" % old, airspace.getName())
self.list[old] = airspace
return old
def printIndex(self, index):
airspace = self.list[index]
hmin, hmin_agl = airspace.getMin()
hmax, hmax_agl = airspace.getMax()
class_name = airspace.getClass()
name = airspace.getName()
hmin_mode = "AGL" if hmin_agl else "MSL"
hmax_mode = "AGL" if hmax_agl else "MSL"
print(" i =%3u %6u %s - %6u %s [%10s] %s" % (index, hmin, hmin_mode, hmax, hmax_mode, class_name, name))
def dumpIndex(self):
buff = bytes()
for i in self.list:
airspace = self.list[i]
hmin, hmin_agl = airspace.getMin()
hmax, hmax_agl = airspace.getMax()
class_name = airspace.getClass()
name = airspace.getName()
hmin = min(hmin, 0x7FFF) + (0x8000 if hmin_agl else 0)
hmax = min(hmax, 0x7FFF) + (0x8000 if hmax_agl else 0)
class_index = CLASS_DICT[class_name]
if len(name) > 49:
name = name[:49]
name += '\0'
#floor 2 0
#ceil 2 2
#class 1 4
#name 50 5
#reserved 9 55
line = (struct.pack("HHB", hmin, hmax, class_index) + bytes(name, "ascii"))
buff += line + bytes('\0', "ascii") * (64 - len(line))
return buff
def dump(lon, lat, airspaces):
#print("lon, lat", lon, lat)
global wantedResolution
global mk_list
global DataSource
profile = False
if profile:
pr = cProfile.Profile()
pr.enable()
lat_n = abs((lat * HGT_COORD_MUL) / HGT_COORD_MUL);
lon_n = abs((lon * HGT_COORD_MUL) / HGT_COORD_MUL);
if lat >= 0:
lat_c = 'N'
else:
lat_c = 'S'
if (lon >= 0):
lon_c = 'E'
else:
lon_c = 'W'
filename = "%c%02u%c%03u.AIR" % (lat_c, lat_n, lon_c, lon_n)
if lat < LAT_MINIMUM:
print (filename, "too low to care, skipping...")
return
if os.path.isfile("data/" + filename) and not force and not mk_list:
print (filename, "exists, skipping...")
return
#do we need to add aditional airspace?
if os.path.exists("lookup/" + filename) and not mk_list:
f = open("lookup/" + filename, "r")
data = f.readlines()
f.close()
#print(data)
needed_as = []
for a in data:
a = a.split()[0]
needed_as.append(a)
if DataSource not in needed_as:
print("%s Not needed for this airspace, skipping..." % filename)
return
for a in needed_as:
if a == DataSource:
continue
load_airspace(a)
#is this airspace over water?
if os.path.exists("agl_tiles.list"):
do_not_generate = True
f = open("agl_tiles.list", "r")
data = f.readlines()
f.close()
needed_as = []
for a in data:
a = a.split(".")[0] + ".AIR"
if a == filename:
do_not_generate = False
if do_not_generate:
print(filename, "over water, skipping...")
return
numPoints = wantedResolution
filesize = numPoints * numPoints * DATA_LEVELS * DATA_LEVEL_SIZE
output = bytearray(filesize)
f = None
indexer = Indexer()
for airspace in airspaces:
airspace.setIndexer(indexer)
try:
# Quickcheck for emptyness
isEmpty = True
delta = AIRSPACE_BORDER / 2
print (filename, "Checking...")
for lat_i in numpy.arange(lat + delta / 2, lat + 1, delta):
for lon_i in numpy.arange(lon + delta / 2, lon + 1, delta):
p = shapely.geometry.Point(lon_i, lat_i)
if dumpPoint(None, 0, p, airspaces) > 0:
isEmpty = False
if not isEmpty:
break
if not isEmpty:
break
if not isEmpty:
if mk_list:
print (filename, "added to list")
f1 = open("lists/%s.list" % os.path.basename(DataSource), "a")
f1.write("%s\n" % filename)
f1.close()
return
print (filename, "computing (%ux%u)" % (numPoints, numPoints))
pos = 0
if not os.path.isdir("data"):
os.mkdir("data")
f = open("data/" + filename, 'wb')
last_per = 999
for lat_i in numpy.arange(lat, lat + 1, 1 / numPoints):
pos += 1
per = int((pos * 100) / numPoints)
if per != last_per:
print ("%s: %u %%" % (filename, per))
last_per = per
for lon_i in numpy.arange(lon, lon + 1, 1 / numPoints):
off = (1 / numPoints) * 0.5
p = shapely.geometry.Point(lon_i + off, lat_i + off)
mul = HGT_COORD_MUL
x = int(round(((lon_i) * mul % mul) * numPoints / mul))
y = int(round(((lat_i) * mul % mul) * numPoints / mul))
offset = int((x * numPoints + y) * DATA_LEVELS * DATA_LEVEL_SIZE)
dumpPoint(output, offset, p, airspaces)
except (KeyboardInterrupt, SystemExit):
print("Exiting...")
if f:
f.close()
os.system("rm data/" + filename)
isEmpty = True
for byte in output:
if byte != 0:
isEmpty = False
break
if isEmpty:
print (filename, "is empty")
else:
f.write(bytes(output))
f.write(bytes(indexer.dumpIndex()))
print (filename, "saved")
f.close()
if profile:
pr.disable()
pr.print_stats(sort = "cumtime")
def load_airspace(filename):
filename = "source/" + filename
print("Loading %s" % filename)
#load airspaces
classes = {}
skipped = []
invalid = []
global bVerbose
global airspaces
global inspect
for oas in openaip.load_file(filename):
if oas.invalid:
invalid.append(oas)
continue
for class_name in CLASS_DICT.keys():
#do not change classes for single letter
if len(class_name) == 1:
continue
#change class if starting as CTR, TMA ...
if oas.name.find(class_name) == 0 and oas.category != class_name:
if (bVerbose > 1):
print(oas.name)
print(" %10s --> %10s" % (oas.category, class_name))
oas.category = class_name
#do we skip this category?
if CLASS_FILTER[oas.category] and oas.bottom.value <= MAX_ALTITUDE:
airspaces.append(Airspace(oas))
if oas.category not in classes:
classes[oas.category] = [oas]
else:
classes[oas.category].append(oas)
else:
skipped.append(oas)
if inspect:
for key in classes:
print("---- %10s (%2u) ----" % (key, len(classes[key])))
for n in classes[key]:
print(" %s" % n)
print()
print("---- %10s (%2u) ----" % ("SKIPPED", len(skipped)))
for s in skipped:
print(" ", s)
print()
print("---- %10s (%2u) ----" % ("INVALID", len(invalid)))
for s in invalid:
print(" ", s)
print()
return
#**********************************************************************
# main()
#**********************************************************************
def usage():
print ('convert.py openairspace-file [lat] [lon]')
def main(argv = None):
global bVerbose
global latOnly, lonOnly
global wantedResolution
global force
global checkPoint
global mk_list
global DataSource
global inspect
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv,"hr:qvlfc:i",["help", "resolution=","quiet","verbose", "list", "force", "check", "inspect"])
except getopt.GetoptError:
usage()
sys.exit(2)
# --------------------------------------------------------------------
# Processing command line arguments.
# --------------------------------------------------------------------
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-q", "--quiet"):
bVerbose = 0
elif opt in ("-v", "--verbose"):
bVerbose = bVerbose + 1
elif opt in ("-r", "--resolution"):
wantedResolution = int(arg)
elif opt in ("-f", "--force"):
force = True
elif opt in ("-l", "--list"):
mk_list = True
elif opt in ("-i", "--inspect"):
inspect = True
elif opt in ("-c", "--check"):
m = re.match('([-+]?\d*\.\d+|\d+),([-+]?\d*\.\d+|\d+)', arg)
if m != None:
checkPoint = shapely.geometry.Point(float(m.group(2)), float(m.group(1)))
print(checkPoint)
else:
print (arg, "is invalid for --check. Use e.g. 48.5,10.2")
sys.exit(1)
if len(args) == 1:
DataSource = args[0]
elif len(args) == 3:
DataSource = args[0]
latOnly = int(args[1])
lonOnly = int(args[2])
else:
usage()
sys.exit(1)
if (bVerbose > 1):
print("Resolution table")
print("Max distance %7.3fkm" % (OFFSET_BASE * 111))
print("Mode 0 %0.10f deg -> %7.3fkm %7.3fkm" % (OFFSET_MUL_0, OFFSET_MUL_0 * 111, OFFSET_MUL_0 * 111 * 64))
print("Mode 1 %0.10f deg -> %7.3fkm %7.3fkm" % (OFFSET_MUL_1, OFFSET_MUL_1 * 111, OFFSET_MUL_1 * 111 * 64))
print("Mode 2 %0.10f deg -> %7.3fkm %7.3fkm" % (OFFSET_MUL_2, OFFSET_MUL_2 * 111, OFFSET_MUL_2 * 111 * 64))
print("Mode 3 normal vector")
DataSource = os.path.basename(DataSource)
load_airspace(DataSource)
boundingBox = getBoundingBox(airspaces)
print("BoundingBox:", boundingBox)
if checkPoint != None:
indexer = Indexer()
for airspace in airspaces:
airspace.setIndexer(indexer)
output = bytearray(DATA_LEVELS * DATA_LEVEL_SIZE)
dumpPoint(output, 0, checkPoint, airspaces, True)
print()
for level in range(DATA_LEVELS):
offset = level * DATA_LEVEL_SIZE
index = int.from_bytes([output[offset+0]], 'little', signed=False)
a = output[offset+1]
b = output[offset+2]
inside = bool(index & 0x80)
index &= 0x7F
mode = (a & 0x80) >> 6 | (b & 0x80) >> 7
lat_offset = a & 0x7F
long_offset = b & 0x7F
if lat_offset & 0x40:
lat_offset = -(lat_offset & 0x3F)
if long_offset & 0x40:
long_offset = -(long_offset & 0x3F)
if index == 0x7F:
print ("%u ---" % level)
continue
print ("%u mode %d lat %d lon %d" % (level, mode, lat_offset, long_offset))
indexer.printIndex(index)
count = 0
if checkPoint == None:
try:
procs = []
if latOnly != None and lonOnly != 0:
p = multiprocessing.Process(target=dump, args=(lonOnly,latOnly,airspaces))
procs.append(p)
else:
for lat in range(int(boundingBox[1])-1,int(boundingBox[3])+2):
for lon in range(int(boundingBox[0])-1,int(boundingBox[2])+2):
p = multiprocessing.Process(target=dump, args=(lon,lat,airspaces))
procs.append(p)
running = []
parallelism = multiprocessing.cpu_count() # set to "1" for sequential
if mk_list:
#if we are making list use only one, since we are writing the result to single file
parallelism = 1
os.system("rm lists/%s.list" % os.path.basename(DataSource))
while len(procs) > 0 or len(running) > 0:
# Start as much processes as we have CPUs
while len(running) < parallelism and len(procs) > 0:
p = procs.pop(0)
p.start()
running.append(p)
for i in range(len(running)):
if not running[i].is_alive():
running[i].join()
del running[i]
# "i" is now wrong, break out and restart
break
#time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
print("Exiting (main)...")
sys.exit(1)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| gpl-2.0 |
dfm/peerless | peerless/catalogs.py | 1 | 6189 | # -*- coding: utf-8 -*-
"""
Code for interfacing with the Exoplanet Archive catalogs.
"""
from __future__ import division, print_function
import os
import logging
from pkg_resources import resource_filename
import pandas as pd
from six.moves import urllib
from .settings import PEERLESS_DATA_DIR
__all__ = [
"KOICatalog", "KICatalog", "EBCatalog", "BlacklistCatalog",
"TargetCatalog", "DatasetsCatalog", "CumulativeCatalog", "UeharaCatalog",
"WangCatalog",
]
def download():
for c in (KOICatalog, KICatalog):
print("Downloading {0}...".format(c.cls.__name__))
c().fetch(clobber=True)
class Catalog(object):
url = None
name = None
ext = ".h5"
def __init__(self, data_root=None):
self.data_root = PEERLESS_DATA_DIR if data_root is None else data_root
self._df = None
self._spatial = None
@property
def filename(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return os.path.join(self.data_root, "catalogs", self.name + self.ext)
def fetch(self, clobber=False):
# Check for a local file first.
fn = self.filename
if os.path.exists(fn) and not clobber:
logging.info("Found local file: '{0}'".format(fn))
return
# Fetch the remote file.
if self.url is None:
raise NotImplementedError("subclasses must provide a URL")
url = self.url
logging.info("Downloading file from: '{0}'".format(url))
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
raise CatalogDownloadError(code, url, "")
# Make sure that the root directory exists.
try:
os.makedirs(os.path.split(fn)[0])
except os.error:
pass
self._save_fetched_file(handler)
def _save_fetched_file(self, file_handle):
raise NotImplementedError("subclasses must implement this method")
@property
def df(self):
if self._df is None:
if not os.path.exists(self.filename):
self.fetch()
self._df = pd.read_hdf(self.filename, self.name)
return self._df
class ExoplanetArchiveCatalog(Catalog):
@property
def url(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table={0}&select=*").format(self.name)
def _save_fetched_file(self, file_handle):
df = pd.read_csv(file_handle)
df.to_hdf(self.filename, self.name, format="t")
class KOICatalog(ExoplanetArchiveCatalog):
name = "q1_q17_dr24_koi"
def join_stars(self, df=None):
if df is None:
df = self.df
kic = KICatalog(data_root=self.data_root)
return pd.merge(df, kic.df, on="kepid")
class KICatalog(ExoplanetArchiveCatalog):
name = "q1_q17_dr24_stellar"
class CumulativeCatalog(ExoplanetArchiveCatalog):
name = "cumulative"
class CatalogDownloadError(Exception):
"""
Exception raised when an catalog download request fails.
:param code:
The HTTP status code that caused the failure.
:param url:
The endpoint (with parameters) of the request.
:param txt:
A human readable description of the error.
"""
def __init__(self, code, url, txt):
super(CatalogDownloadError, self).__init__(
"The download returned code {0} for URL: '{1}' with message:\n{2}"
.format(code, url, txt))
self.code = code
self.txt = txt
self.url = url
class LocalCatalog(object):
filename = None
args = dict()
def __init__(self):
self._df = None
@property
def df(self):
if self._df is None:
fn = os.path.join("data", self.filename)
self._df = pd.read_csv(resource_filename(__name__, fn),
**(self.args))
return self._df
class EBCatalog(LocalCatalog):
filename = "ebs.csv"
args = dict(skiprows=7)
class LongPeriodEBCatalog(LocalCatalog):
filename = "lpebs.csv"
args = dict(delim_whitespace=True,
names=["kicid", "period", "width", "t0", "ra", "dec"])
class BlacklistCatalog(LocalCatalog):
filename = "blacklist.csv"
class UeharaCatalog(LocalCatalog):
filename = "uehara.csv"
args = dict(delim_whitespace=True)
class WangCatalog(LocalCatalog):
filename = "wang.csv"
class TargetCatalog(LocalCatalog):
filename = "targets.csv"
@property
def df(self):
if self._df is None:
fn = os.path.join(PEERLESS_DATA_DIR, "catalogs", self.filename)
try:
self._df = pd.read_csv(fn, **(self.args))
except OSError:
print("The target catalog doesn't exist. "
"You need to run 'peerless-targets'")
raise
return self._df
class DatasetsCatalog(LocalCatalog):
filename = "datasets.h5"
@property
def df(self):
if self._df is None:
fn = os.path.join(PEERLESS_DATA_DIR, "catalogs", self.filename)
try:
self._df = pd.read_hdf(fn, "datasets", **(self.args))
except OSError:
print("The datasets catalog doesn't exist. "
"You need to run 'peerless-datasets'")
raise
return self._df
class singleton(object):
def __init__(self, cls):
self.cls = cls
self.inst = None
def __call__(self, *args, **kwargs):
if self.inst is None:
self.inst = self.cls(*args, **kwargs)
return self.inst
# Set all the catalogs to be singletons so that the data are shared across
# instances.
KOICatalog = singleton(KOICatalog)
KICatalog = singleton(KICatalog)
EBCatalog = singleton(EBCatalog)
BlacklistCatalog = singleton(BlacklistCatalog)
TargetCatalog = singleton(TargetCatalog)
DatasetsCatalog = singleton(DatasetsCatalog)
| mit |
enlighter/learnML | mini-projects/p0 - titanic survival exploration/titanic_visualizations.py | 3 | 5286 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/scipy/signal/waveforms.py | 9 | 17461 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
from scipy._lib.six import string_types
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly',
'unit_impulse']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if isinstance(t, string_types):
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
else:
raise ValueError("If `t` is a string, it must be 'cutoff'")
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
def unit_impulse(shape, idx=None, dtype=float):
"""
Unit impulse signal (discrete delta function) or unit basis vector.
Parameters
----------
shape : int or tuple of int
Number of samples in the output (1-D), or a tuple that represents the
shape of the output (N-D).
idx : None or int or tuple of int or 'mid', optional
Index at which the value is 1. If None, defaults to the 0th element.
If ``idx='mid'``, the impulse will be centered at ``shape // 2`` in
all dimensions. If an int, the impulse will be at `idx` in all
dimensions.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
Returns
-------
y : ndarray
Output array containing an impulse signal.
Notes
-----
The 1D case is also known as the Kronecker delta.
.. versionadded:: 0.19.0
Examples
--------
An impulse at the 0th element (:math:`\\delta[n]`):
>>> from scipy import signal
>>> signal.unit_impulse(8)
array([ 1., 0., 0., 0., 0., 0., 0., 0.])
Impulse offset by 2 samples (:math:`\\delta[n-2]`):
>>> signal.unit_impulse(7, 2)
array([ 0., 0., 1., 0., 0., 0., 0.])
2-dimensional impulse, centered:
>>> signal.unit_impulse((3, 3), 'mid')
array([[ 0., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 0.]])
Impulse at (2, 2), using broadcasting:
>>> signal.unit_impulse((4, 4), 2)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 0.]])
Plot the impulse response of a 4th-order Butterworth lowpass filter:
>>> imp = signal.unit_impulse(100, 'mid')
>>> b, a = signal.butter(4, 0.2)
>>> response = signal.lfilter(b, a, imp)
>>> import matplotlib.pyplot as plt
>>> plt.plot(np.arange(-50, 50), imp)
>>> plt.plot(np.arange(-50, 50), response)
>>> plt.margins(0.1, 0.1)
>>> plt.xlabel('Time [samples]')
>>> plt.ylabel('Amplitude')
>>> plt.grid(True)
>>> plt.show()
"""
out = zeros(shape, dtype)
shape = np.atleast_1d(shape)
if idx is None:
idx = (0,) * len(shape)
elif idx == 'mid':
idx = tuple(shape // 2)
elif not hasattr(idx, "__iter__"):
idx = (idx,) * len(shape)
out[idx] = 1
return out
| agpl-3.0 |
crew/metrics-api | examples/ex_matplotlib.py | 1 | 1101 | import sys
sys.path.insert(0, '..')
import time
import threading
from crew.metrics.httpapi import HttpAPI
# XXX The test script needs matplotlib
def f():
# create the api object.
api = HttpAPI(namespace='ns', apikey='apikey', hostname='localhost',
port=2000, timeout=20)
# set the start and end range. Not implemented yet.
start_time = time.time() - 100000
end_time = time.time()
# Do a retrieve and display the graph.
for _ in range(1):
x = api.retrieve(start_time=start_time, end_time=end_time, interval=1,
attributes={'type': 'E'},
fields=['amount'])
print x
# test_plot(x)
print len(x)
def test_plot(x):
import matplotlib.pyplot as plt
times = [y['timestamp'] for y in x]
amounts = [y['amount'] for y in x]
z = plt.plot(times, amounts, 'g^-')
plt.show()
def main():
ts = []
for _ in range(1):
t = threading.Thread(target=f)
t.daemon = True
t.start()
ts.append(t)
for t in ts:
t.join()
if __name__ == '__main__':
main()
| mit |
airanmehr/bio | Scripts/TimeSeriesPaper/RealData/Run.py | 1 | 6385 | '''
Copyleft Jun 10, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import sys;
import numpy as np;
sys.path.insert(1, '/home/arya/workspace/bio/')
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 40;
pd.options.display.max_columns = 40;
pd.options.display.expand_frame_repr = False
import os;
home = os.path.expanduser('~') + '/'
from time import time
numProcess = 1
print 'numProcess=',numProcess
import Utils.Plots as pplt
import Scripts.TimeSeriesPaper.RealData.GeneAnalysis as ga
import Scripts.TimeSeriesPaper.RealData.Utils as rutl
import CLEAR.Libs.Markov as mkv
reload(pplt)
reload(ga)
import numpy as np
import sys
sys.path.insert(1, '/home/arya/workspace/bio/')
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
import Utils.Util as utl
import optparse, socket, datetime
parser = optparse.OptionParser()
parser.add_option('-d', '--overdominance', action="store", dest="h", help="nu0=[0.005,0.1]", default=0.5, type="float")
parser.add_option('-o', '--shutstd', action="store", dest="shutstd", help="takes 0,1", default=0, type='int')
options, args = parser.parse_args()
options.runname = 'h{}.'.format(int(options.h * 100)) + str(datetime.datetime.now()).split('.')[0]
print utl.stdoutpath + options.runname
if options.shutstd:
sys.stderr = sys.stdout = open(utl.stdoutpath + options.runname + '.out', 'w')
print 'Running on', socket.gethostname(), str(datetime.datetime.now()), options;
sys.stdout.flush()
def ML():
pd.read_pickle('/home/arya/out/real/scores.df')
import Scripts.TimeSeriesPaper.Estimate as est
import Utils.Simulation as Simulation
reload(est)
E = pd.read_pickle(utl.outpath + 'markov/Emissions.df')
sim = Simulation.Simulation.Load();
sim.computeCDi(E, 300)
sim.cd
est.CMH(sim)
cd = pd.read_pickle(utl.outpath + 'real/CD.df').iloc[:10].loc[:, pd.IndexSlice[:, [0, 37], :]]
cd = pd.read_pickle(utl.outpath + 'real/CD.df').iloc[:10].loc[:, pd.IndexSlice[:, [0, 15, 23], :]]
x = cd.groupby(level=[0, 1], axis=1).apply(
lambda x: x.apply(lambda y: [(y.values[0], y.values[1] - y.values[0])], axis=1)).xs('C', level='READ', axis=1)
x
xx2 = x[(2, 15)].copy(True);
x[(2, 15)] = x[(0, 23)];
x[(0, 23)] = xx2;
x
def replicatesSanityCheck():
a = pd.read_csv(utl.dataPathDmel + 'AlternatingTemperatures/F37/BF37.head', header=None, sep='\t').iloc[:, [0, 1, -1]].set_index([0, 1]).sort_index().iloc[:,0]
a
cd=pd.read_pickle('/home/arya/out/real/CD.F59.df').loc[a.index,pd.IndexSlice[:,[0,37]]]
print (a-utl.CMHcd(cd,damp=0,negLog10=False,eps=0)).abs().sum()
CD=pd.read_pickle('/home/arya/out/real/CD.F59.df').loc[a.index]
a = pd.read_csv(utl.dataPathDmel + 'AlternatingTemperatures/F37/BF15.head', header=None, sep='\t').iloc[:, [0, 1, -1]].set_index([0, 1]).sort_index().iloc[:,0]
cd=pd.read_pickle('/home/arya/out/real/CD.F59.df').loc[a.index]
cd=cd.groupby(level=[0],axis=1).apply(lambda x: x.iloc[:,:4]).T.dropna().reset_index()
cd.GEN=cd.GEN.replace(23,15)
cd=cd.set_index(['REP','GEN','READ']).T
print (a-utl.CMHcd(cd,damp=0,negLog10=False,eps=0)).abs().sum()
def poolReplicates():
try:
return pd.read_pickle('/home/arya/out/real/CD.F59.pooled.df')
except:
CD = pd.read_pickle(utl.outpath + 'real/CD.F59.df')
c = CD.xs('C', level='READ', axis=1).groupby(level=1,axis=1).sum()
d = CD.xs('D', level='READ', axis=1).groupby(level=1,axis=1).sum()
d=d[rutl.filterHighCoverage()]
d=d[(d.apply(lambda x: x>x.quantile(0.9998)).sum(1)==0)]
d=d[(d.iloc[:,-2:].apply(lambda x: x>x.quantile(0.9992)).sum(1)==0)]
CD=pd.concat([pd.concat([c,d],1,keys=['C','D'])],1,keys=[0]).reorder_levels([0,2,1],1).sort_index(1).dropna()
CD.columns.names=['REP','GEN','READ']
CD.to_pickle('/home/arya/out/real/CD.F59.pooled.df')
return CD
def loadFilteredCoverage():
CD=pd.read_pickle(utl.outpath+'real/CD.F59.df')
CD2=CD.loc[pd.read_pickle('/home/arya/out/real/CD.F59.pooled.df').index]
d=CD2.xs('D',level=2,axis=1)
D=CD.xs('D',level=2,axis=1)
dd=d.apply(lambda x: x[x>x.quantile(0.8)])
cutoff=dd.apply(lambda x: x.mean()+6*x.std())
ddd=d.groupby(level=[0,1],axis=1).apply(lambda x: x[x<cutoff[x.name]]).dropna()
CD2=CD2.loc[ddd.index]
return CD2
def analyze():
f=lambda x: x.alt-x.null
a=f(pd.read_pickle('/home/arya/out/real/HMM/HMM.df')[0.5]).rename('H')
CD=loadFilteredCoverage()
X=CD.groupby(level=[0,1],axis=1).apply(lambda x : x[x.name].C/x[x.name].D).groupby(level='GEN',axis=1).mean()
df=pd.concat([a.sort_values()[-2000:],X,CD.xs('D',level=2,axis=1).sum(1).rename('D')],1).dropna()
df.plot.scatter(x=0,y='H');df.plot.scatter(x=59,y='H');df.plot.scatter(x=0,y=59);df.plot.scatter(x='H',y='D')
pplt.Manhattan(a,std_th=5)
df.sort_values([59])
pplt.GenomeChromosomewise( df.H)
df.groupby(level=0).size()
chroms=['2L','2R','3L','3R','X','4']
a.loc[chroms].to_csv(utl.outpath+'real/gowinda/allsnps.txt',sep='\t')
a.loc[chroms].sort_values(ascending=False).iloc[:2000].sort_index().to_csv(utl.outpath+'real/gowinda/cands.final.txt',sep='\t')
aa=a.loc[chroms]
pplt.Manhattan(aa[aa>aa.quantile(0.8)],top_k=2000);pplt.savefig('manhattan',300)
def scanNulls(N,k):
path='/home/arya/out/real/NeutralSims/N{}/'.format(N)
for i in range(k):
mkv.HMM(gridH=[0.5,5], CDfname=path+'CD.{}.df'.format(i),N=N,path=path,loadCDE= True,saveCDE=True,transitionsPath=utl.outpath+'real/HMM/T/').fit(True)
if __name__ == '__main__':
start=time()
path=utl.outpath+'real/HMM/'
# dta.createF37VCF()
# dta.computeF37()
# dta.computeF59()
#options.h=0.5
reload(mkv)
HMM=mkv.HMM(gridH=[0.5,5], CDfname=utl.outpath+'real/CD.F59.df',N=250,path=path,loadCDE= True,saveCDE=False,precomputeTransitions=False)
HMM.fit(True)
#HMM=mkv.HMM(H=[0.5],N=1000,Ns=500,path=path,transitionsPath=path+'T/',loadCDE=True,batchSize=int(3e5))
# HMM=mkv.HMM(N=1000,Ns=500,path=path,loadCDE=True,batchSize=int(1e5)).fit(True)
# scores=pd.read_pickle('/home/arya/out/real/HMM/HMM.df')
#rutl.scanCMH()
# ga.computeGeneRankings()
print '\nDone in {:.1f} secs.'.format(time()-start)
| mit |
benschmaus/catapult | third_party/google-endpoints/endpoints/test/api_config_test.py | 7 | 90686 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for endpoints.api_config."""
import itertools
import json
import logging
import unittest
import endpoints.api_config as api_config
from endpoints.api_config import ApiConfigGenerator
from endpoints.api_config import AUTH_LEVEL
import endpoints.api_exceptions as api_exceptions
import mock
from protorpc import message_types
from protorpc import messages
from protorpc import remote
import endpoints.resource_container as resource_container
import test_util
package = 'api_config_test'
_DESCRIPTOR_PATH_PREFIX = ''
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
unittest.TestCase):
MODULE = api_config
class Nested(messages.Message):
"""Message class to be used in a message field."""
int_value = messages.IntegerField(1)
string_value = messages.StringField(2)
class SimpleEnum(messages.Enum):
"""Simple enumeration type."""
VAL1 = 1
VAL2 = 2
class AllFields(messages.Message):
"""Contains all field types."""
bool_value = messages.BooleanField(1, variant=messages.Variant.BOOL)
bytes_value = messages.BytesField(2, variant=messages.Variant.BYTES)
double_value = messages.FloatField(3, variant=messages.Variant.DOUBLE)
enum_value = messages.EnumField(SimpleEnum, 4)
float_value = messages.FloatField(5, variant=messages.Variant.FLOAT)
int32_value = messages.IntegerField(6, variant=messages.Variant.INT32)
int64_value = messages.IntegerField(7, variant=messages.Variant.INT64)
string_value = messages.StringField(8, variant=messages.Variant.STRING)
uint32_value = messages.IntegerField(9, variant=messages.Variant.UINT32)
uint64_value = messages.IntegerField(10, variant=messages.Variant.UINT64)
sint32_value = messages.IntegerField(11, variant=messages.Variant.SINT32)
sint64_value = messages.IntegerField(12, variant=messages.Variant.SINT64)
message_field_value = messages.MessageField(Nested, 13)
datetime_value = message_types.DateTimeField(14)
# This is used test "all fields" as query parameters instead of the body
# in a request.
ALL_FIELDS_AS_PARAMETERS = resource_container.ResourceContainer(
**{field.name: field for field in AllFields.all_fields()})
class ApiConfigTest(unittest.TestCase):
def setUp(self):
self.generator = ApiConfigGenerator()
self.maxDiff = None
def testAllVariantsCovered(self):
variants_covered = set([field.variant for field in AllFields.all_fields()])
for variant in variants_covered:
self.assertTrue(isinstance(variant, messages.Variant))
variants_covered_dict = {}
for variant in variants_covered:
number = variant.number
if variants_covered_dict.get(variant.name, number) != number:
self.fail('Somehow have two variants with same name and '
'different number')
variants_covered_dict[variant.name] = number
test_util.AssertDictEqual(
messages.Variant.to_dict(), variants_covered_dict, self)
def testAllFieldTypes(self):
class PutRequest(messages.Message):
"""Message with just a body field."""
body = messages.MessageField(AllFields, 1)
class ItemsPutRequest(messages.Message):
"""Message with path params and a body field."""
body = messages.MessageField(AllFields, 1)
entryId = messages.StringField(2, required=True)
class ItemsPutRequestForContainer(messages.Message):
"""Message with path params and a body field."""
body = messages.MessageField(AllFields, 1)
items_put_request_container = resource_container.ResourceContainer(
ItemsPutRequestForContainer,
entryId=messages.StringField(2, required=True))
class EntryPublishRequest(messages.Message):
"""Message with two required params, one in path, one in body."""
title = messages.StringField(1, required=True)
entryId = messages.StringField(2, required=True)
class EntryPublishRequestForContainer(messages.Message):
"""Message with two required params, one in path, one in body."""
title = messages.StringField(1, required=True)
entry_publish_request_container = resource_container.ResourceContainer(
EntryPublishRequestForContainer,
entryId=messages.StringField(2, required=True))
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(AllFields, message_types.VoidMessage, path='entries',
http_method='GET', name='entries.get')
def entries_get(self, unused_request):
"""All field types in the query parameters."""
return message_types.VoidMessage()
@api_config.method(ALL_FIELDS_AS_PARAMETERS, message_types.VoidMessage,
path='entries/container', http_method='GET',
name='entries.getContainer')
def entries_get_container(self, unused_request):
"""All field types in the query parameters."""
return message_types.VoidMessage()
@api_config.method(PutRequest, message_types.VoidMessage, path='entries',
name='entries.put')
def entries_put(self, unused_request):
"""Request body is in the body field."""
return message_types.VoidMessage()
@api_config.method(AllFields, message_types.VoidMessage, path='process',
name='entries.process')
def entries_process(self, unused_request):
"""Message is the request body."""
return message_types.VoidMessage()
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='entries.nested.collection.action',
path='nested')
def entries_nested_collection_action(self, unused_request):
"""A VoidMessage for a request body."""
return message_types.VoidMessage()
@api_config.method(AllFields, AllFields, name='entries.roundtrip',
path='roundtrip')
def entries_roundtrip(self, unused_request):
"""All field types in the request and response."""
pass
# Test a method with a required parameter in the request body.
@api_config.method(EntryPublishRequest, message_types.VoidMessage,
path='entries/{entryId}/publish',
name='entries.publish')
def entries_publish(self, unused_request):
"""Path has a parameter and request body has a required param."""
return message_types.VoidMessage()
@api_config.method(entry_publish_request_container,
message_types.VoidMessage,
path='entries/container/{entryId}/publish',
name='entries.publishContainer')
def entries_publish_container(self, unused_request):
"""Path has a parameter and request body has a required param."""
return message_types.VoidMessage()
# Test a method with a parameter in the path and a request body.
@api_config.method(ItemsPutRequest, message_types.VoidMessage,
path='entries/{entryId}/items',
name='entries.items.put')
def items_put(self, unused_request):
"""Path has a parameter and request body is in the body field."""
return message_types.VoidMessage()
@api_config.method(items_put_request_container, message_types.VoidMessage,
path='entries/container/{entryId}/items',
name='entries.items.putContainer')
def items_put_container(self, unused_request):
"""Path has a parameter and request body is in the body field."""
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
expected = {
'root.entries.get': {
'description': 'All field types in the query parameters.',
'httpMethod': 'GET',
'path': 'entries',
'request': {
'body': 'empty',
'parameters': {
'bool_value': {
'type': 'boolean',
},
'bytes_value': {
'type': 'bytes',
},
'double_value': {
'type': 'double',
},
'enum_value': {
'type': 'string',
'enum': {
'VAL1': {
'backendValue': 'VAL1',
},
'VAL2': {
'backendValue': 'VAL2',
},
},
},
'float_value': {
'type': 'float',
},
'int32_value': {
'type': 'int32',
},
'int64_value': {
'type': 'int64',
},
'string_value': {
'type': 'string',
},
'uint32_value': {
'type': 'uint32',
},
'uint64_value': {
'type': 'uint64',
},
'sint32_value': {
'type': 'int32',
},
'sint64_value': {
'type': 'int64',
},
'message_field_value.int_value': {
'type': 'int64',
},
'message_field_value.string_value': {
'type': 'string',
},
'datetime_value.milliseconds': {
'type': 'int64',
},
'datetime_value.time_zone_offset': {
'type': 'int64',
},
},
},
'response': {
'body': 'empty',
},
'rosyMethod': 'MyService.entries_get',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.getContainer': {
'description': 'All field types in the query parameters.',
'httpMethod': 'GET',
'path': 'entries/container',
'request': {
'body': 'empty',
'parameters': {
'bool_value': {
'type': 'boolean'
},
'bytes_value': {
'type': 'bytes'
},
'datetime_value.milliseconds': {
'type': 'int64'
},
'datetime_value.time_zone_offset': {
'type': 'int64'
},
'double_value': {
'type': 'double'
},
'enum_value': {
'enum': {
'VAL1': {'backendValue': 'VAL1'},
'VAL2': {'backendValue': 'VAL2'},
},
'type': 'string',
},
'float_value': {
'type': 'float'
},
'int32_value': {
'type': 'int32'
},
'int64_value': {
'type': 'int64'
},
'message_field_value.int_value': {
'type': 'int64'
},
'message_field_value.string_value': {
'type': 'string'
},
'sint32_value': {
'type': 'int32'
},
'sint64_value': {
'type': 'int64'
},
'string_value': {
'type': 'string'
},
'uint32_value': {
'type': 'uint32'
},
'uint64_value': {
'type': 'uint64'
}
}
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_get_container',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.publishContainer': {
'description': ('Path has a parameter and request body has a '
'required param.'),
'httpMethod': 'POST',
'path': 'entries/container/{entryId}/publish',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource',
'parameterOrder': ['entryId'],
'parameters': {
'entryId': {
'required': True,
'type': 'string',
}
}
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_publish_container',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.put': {
'description': 'Request body is in the body field.',
'httpMethod': 'POST',
'path': 'entries',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource'
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_put',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.process': {
'description': 'Message is the request body.',
'httpMethod': 'POST',
'path': 'process',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource'
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_process',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.nested.collection.action': {
'description': 'A VoidMessage for a request body.',
'httpMethod': 'POST',
'path': 'nested',
'request': {
'body': 'empty'
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_nested_collection_action',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.roundtrip': {
'description': 'All field types in the request and response.',
'httpMethod': 'POST',
'path': 'roundtrip',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource'
},
'response': {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'
},
'rosyMethod': 'MyService.entries_roundtrip',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.publish': {
'description':
'Path has a parameter and request body has a required param.',
'httpMethod': 'POST',
'path': 'entries/{entryId}/publish',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource',
'parameterOrder': [
'entryId'
],
'parameters': {
'entryId': {
'type': 'string',
'required': True,
},
},
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.entries_publish',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.items.put': {
'description':
'Path has a parameter and request body is in the body field.',
'httpMethod': 'POST',
'path': 'entries/{entryId}/items',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource',
'parameterOrder': [
'entryId'
],
'parameters': {
'entryId': {
'type': 'string',
'required': True,
},
},
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.items_put',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
},
'root.entries.items.putContainer': {
'description': ('Path has a parameter and request body is in '
'the body field.'),
'httpMethod': 'POST',
'path': 'entries/container/{entryId}/items',
'request': {
'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource',
'parameterOrder': [
'entryId'
],
'parameters': {
'entryId': {
'type': 'string',
'required': True,
},
},
},
'response': {
'body': 'empty'
},
'rosyMethod': 'MyService.items_put_container',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
}
}
expected_descriptor = {
'methods': {
'MyService.entries_get': {},
'MyService.entries_get_container': {},
'MyService.entries_nested_collection_action': {},
'MyService.entries_process': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields')
}
},
'MyService.entries_publish': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequest')
}
},
'MyService.entries_publish_container': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequestForContainer')
}
},
'MyService.entries_put': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestPutRequest')
}
},
'MyService.entries_roundtrip': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields')
},
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields')
}
},
'MyService.items_put': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequest')
}
},
'MyService.items_put_container': {
'request': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer')
}
}
},
'schemas': {
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestAllFields': {
'description': 'Contains all field types.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestAllFields',
'properties': {
'bool_value': {
'type': 'boolean'
},
'bytes_value': {
'type': 'string',
'format': 'byte'
},
'double_value': {
'format': 'double',
'type': 'number'
},
'enum_value': {
'type': 'string',
'enum': ['VAL1', 'VAL2']
},
'float_value': {
'format': 'float',
'type': 'number'
},
'int32_value': {
'format': 'int32',
'type': 'integer'
},
'int64_value': {
'format': 'int64',
'type': 'string'
},
'string_value': {
'type': 'string'
},
'uint32_value': {
'format': 'uint32',
'type': 'integer'
},
'uint64_value': {
'format': 'uint64',
'type': 'string'
},
'sint32_value': {
'format': 'int32',
'type': 'integer'
},
'sint64_value': {
'format': 'int64',
'type': 'string'
},
'message_field_value': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestNested'),
'description': ('Message class to be used in a '
'message field.'),
},
'datetime_value': {
'format': 'date-time',
'type': 'string'
},
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestEntryPublishRequest': {
'description': ('Message with two required params, '
'one in path, one in body.'),
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequest'),
'properties': {
'entryId': {
'required': True,
'type': 'string'
},
'title': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
(_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequestForContainer'): {
'description': ('Message with two required params, '
'one in path, one in body.'),
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestEntryPublishRequestForContainer'),
'properties': {
'title': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestItemsPutRequest': {
'description': 'Message with path params and a body field.',
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequest'),
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
},
'entryId': {
'required': True,
'type': 'string'
}
},
'type': 'object'
},
(_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer'): {
'description': 'Message with path params and a body field.',
'id': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestItemsPutRequestForContainer'),
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
},
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestNested': {
'description': 'Message class to be used in a message field.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestNested',
'properties': {
'int_value': {
'format': 'int64',
'type': 'string'
},
'string_value': {
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestPutRequest': {
'description': 'Message with just a body field.',
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestPutRequest',
'properties': {
'body': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestAllFields'),
'description': 'Contains all field types.'
}
},
'type': 'object'
},
'ProtorpcMessageTypesVoidMessage': {
'description': 'Empty message.',
'id': 'ProtorpcMessageTypesVoidMessage',
'properties': {},
'type': 'object'
}
}
}
expected_adapter = {
'bns': 'https://example.appspot.com/_ah/api',
'type': 'lily',
'deadline': 10.0}
test_util.AssertDictEqual(expected, api['methods'], self)
test_util.AssertDictEqual(expected_descriptor, api['descriptor'], self)
test_util.AssertDictEqual(expected_adapter, api['adapter'], self)
self.assertEqual('Describes MyService.', api['description'])
methods = api['descriptor']['methods']
self.assertTrue('MyService.entries_get' in methods)
self.assertTrue('MyService.entries_put' in methods)
self.assertTrue('MyService.entries_process' in methods)
self.assertTrue('MyService.entries_nested_collection_action' in methods)
def testEmptyRequestNonEmptyResponse(self):
class MyResponse(messages.Message):
bool_value = messages.BooleanField(1)
int32_value = messages.IntegerField(2)
@api_config.api(name='root', version='v1', hostname='example.appspot.com')
class MySimpleService(remote.Service):
@api_config.method(message_types.VoidMessage, MyResponse,
name='entries.get')
def entries_get(self, request):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
MySimpleService))
expected_request = {
'body': 'empty'
}
expected_response = {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'
}
test_util.AssertDictEqual(
expected_response, api['methods']['root.entries.get']['response'], self)
test_util.AssertDictEqual(
expected_request, api['methods']['root.entries.get']['request'], self)
def testEmptyService(self):
@api_config.api('root', 'v1', hostname='example.appspot.com')
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
self.assertTrue('methods' not in api)
def testOptionalProperties(self):
"""Verify that optional config properties show up if they're supposed to."""
optional_props = (
('canonical_name', 'canonicalName', 'Test Canonical Name'),
('owner_domain', 'ownerDomain', 'google.com'),
('owner_name', 'ownerName', 'Google'),
('package_path', 'packagePath', 'cloud/platform'),
('title', 'title', 'My Root API'),
('documentation', 'documentation', 'http://link.to/docs'))
# Try all combinations of the above properties.
for length in range(1, len(optional_props) + 1):
for combination in itertools.combinations(optional_props, length):
kwargs = {}
for property_name, _, value in combination:
kwargs[property_name] = value
@api_config.api('root', 'v1', **kwargs)
class MyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
for _, config_name, value in combination:
self.assertEqual(api[config_name], value)
# If the value is not set, verify that it's not there.
for property_name, config_name, value in optional_props:
@api_config.api('root2', 'v2')
class EmptyService2(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
EmptyService2))
self.assertNotIn(config_name, api)
def testAuth(self):
"""Verify that auth shows up in the config if it's supposed to."""
empty_auth = api_config.ApiAuth()
used_auth = api_config.ApiAuth(allow_cookie_auth=False)
cookie_auth = api_config.ApiAuth(allow_cookie_auth=True)
empty_blocked_regions = api_config.ApiAuth(blocked_regions=[])
one_blocked = api_config.ApiAuth(blocked_regions=['us'])
many_blocked = api_config.ApiAuth(blocked_regions=['CU', 'IR', 'KP', 'SD',
'SY', 'MM'])
mixed = api_config.ApiAuth(allow_cookie_auth=True,
blocked_regions=['US', 'IR'])
for auth, expected_result in ((None, None),
(empty_auth, None),
(used_auth, {'allowCookieAuth': False}),
(cookie_auth, {'allowCookieAuth': True}),
(empty_blocked_regions, None),
(one_blocked, {'blockedRegions': ['us']}),
(many_blocked, {'blockedRegions':
['CU', 'IR', 'KP', 'SD',
'SY', 'MM']}),
(mixed, {'allowCookieAuth': True,
'blockedRegions': ['US', 'IR']})):
@api_config.api('root', 'v1', auth=auth)
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
if expected_result is None:
self.assertNotIn('auth', api)
else:
self.assertEqual(api['auth'], expected_result)
def testFrontEndLimits(self):
"""Verify that frontendLimits info in the API is written to the config."""
rules = [
api_config.ApiFrontEndLimitRule(match='foo', qps=234, user_qps=567,
daily=8910, analytics_id='asdf'),
api_config.ApiFrontEndLimitRule(match='bar', qps=0, user_qps=0,
analytics_id='sdf1'),
api_config.ApiFrontEndLimitRule()]
frontend_limits = api_config.ApiFrontEndLimits(unregistered_user_qps=123,
unregistered_qps=456,
unregistered_daily=789,
rules=rules)
@api_config.api('root', 'v1', frontend_limits=frontend_limits)
class EmptyService(remote.Service):
pass
api = json.loads(self.generator.pretty_print_config_to_json(EmptyService))
self.assertIn('frontendLimits', api)
self.assertEqual(123, api['frontendLimits'].get('unregisteredUserQps'))
self.assertEqual(456, api['frontendLimits'].get('unregisteredQps'))
self.assertEqual(789, api['frontendLimits'].get('unregisteredDaily'))
self.assertEqual(2, len(api['frontendLimits'].get('rules')))
self.assertEqual('foo', api['frontendLimits']['rules'][0]['match'])
self.assertEqual(234, api['frontendLimits']['rules'][0]['qps'])
self.assertEqual(567, api['frontendLimits']['rules'][0]['userQps'])
self.assertEqual(8910, api['frontendLimits']['rules'][0]['daily'])
self.assertEqual('asdf', api['frontendLimits']['rules'][0]['analyticsId'])
self.assertEqual('bar', api['frontendLimits']['rules'][1]['match'])
self.assertEqual(0, api['frontendLimits']['rules'][1]['qps'])
self.assertEqual(0, api['frontendLimits']['rules'][1]['userQps'])
self.assertNotIn('daily', api['frontendLimits']['rules'][1])
self.assertEqual('sdf1', api['frontendLimits']['rules'][1]['analyticsId'])
def testAllCombinationsRepeatedRequiredDefault(self):
# TODO(kdeus): When the backwards compatibility for non-ResourceContainer
# parameters requests is removed, this class and the
# accompanying method should be removed.
class AllCombinations(messages.Message):
"""Documentation for AllCombinations."""
string = messages.StringField(1)
string_required = messages.StringField(2, required=True)
string_default_required = messages.StringField(3, required=True,
default='Foo')
string_repeated = messages.StringField(4, repeated=True)
enum_value = messages.EnumField(SimpleEnum, 5, default=SimpleEnum.VAL2)
all_combinations_container = resource_container.ResourceContainer(
**{field.name: field for field in AllCombinations.all_fields()})
@api_config.api('root', 'v1', hostname='example.appspot.com')
class MySimpleService(remote.Service):
@api_config.method(AllCombinations, message_types.VoidMessage,
path='foo', http_method='GET')
def get(self, unused_request):
return message_types.VoidMessage()
@api_config.method(all_combinations_container, message_types.VoidMessage,
name='getContainer',
path='bar', http_method='GET')
def get_container(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(
MySimpleService))
get_config = {
'httpMethod': 'GET',
'path': 'foo',
'request': {
'body': 'empty',
'parameterOrder': [
'string_required',
'string_default_required',
],
'parameters': {
'enum_value': {
'default': 'VAL2',
'type': 'string',
'enum': {
'VAL1': {
'backendValue': 'VAL1',
},
'VAL2': {
'backendValue': 'VAL2',
},
},
},
'string': {
'type': 'string',
},
'string_default_required': {
'default': 'Foo',
'required': True,
'type': 'string',
},
'string_repeated': {
'type': 'string',
'repeated': True,
},
'string_required': {
'required': True,
'type': 'string',
},
},
},
'response': {
'body': 'empty',
},
'rosyMethod': 'MySimpleService.get',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
}
get_container_config = get_config.copy()
get_container_config['path'] = 'bar'
get_container_config['rosyMethod'] = 'MySimpleService.get_container'
expected = {
'root.get': get_config,
'root.getContainer': get_container_config
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesSingleApi(self):
"""Test an API that's split into multiple classes."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
# First class has a request that reads some arguments.
class Response1(messages.Message):
string_value = messages.StringField(1)
@root_api.api_class(resource_name='request')
class RequestService(remote.Service):
@api_config.method(message_types.VoidMessage, Response1,
path='request_path', http_method='GET')
def my_request(self, unused_request):
pass
# Second class, no methods.
@root_api.api_class(resource_name='empty')
class EmptyService(remote.Service):
pass
# Third class (& data), one method that returns a response.
class Response2(messages.Message):
bool_value = messages.BooleanField(1)
int32_value = messages.IntegerField(2)
@root_api.api_class(resource_name='simple')
class MySimpleService(remote.Service):
@api_config.method(message_types.VoidMessage, Response2,
name='entries.get', path='entries')
def EntriesGet(self, request):
pass
# Make sure api info is the same for all classes and all the _ApiInfo
# properties are accessible.
for cls in (RequestService, EmptyService, MySimpleService):
self.assertEqual(cls.api_info.name, 'root')
self.assertEqual(cls.api_info.version, 'v1')
self.assertEqual(cls.api_info.hostname, 'example.appspot.com')
self.assertIsNone(cls.api_info.audiences)
self.assertEqual(cls.api_info.allowed_client_ids,
[api_config.API_EXPLORER_CLIENT_ID])
self.assertEqual(cls.api_info.scopes, [api_config.EMAIL_SCOPE])
# Get the config for the combination of all 3.
api = json.loads(self.generator.pretty_print_config_to_json(
[RequestService, EmptyService, MySimpleService]))
expected = {
'root.request.my_request': {
'httpMethod': 'GET',
'path': 'request_path',
'request': {'body': 'empty'},
'response': {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'RequestService.my_request',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.simple.entries.get': {
'httpMethod': 'POST',
'path': 'entries',
'request': {'body': 'empty'},
'response': {
'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'MySimpleService.EntriesGet',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
expected_descriptor = {
'methods': {
'MySimpleService.EntriesGet': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse2')
}
},
'RequestService.my_request': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse1')
}
}
},
'schemas': {
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1',
'properties': {
'string_value': {
'type': 'string'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2',
'properties': {
'bool_value': {
'type': 'boolean'
},
'int32_value': {
'format': 'int64',
'type': 'string'
}
},
'type': 'object'
}
}
}
test_util.AssertDictEqual(expected_descriptor, api['descriptor'], self)
def testMultipleClassesDifferentDecoratorInstance(self):
"""Test that using different instances of @api fails."""
root_api1 = api_config.api('root', 'v1', hostname='example.appspot.com')
root_api2 = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api1.api_class()
class EmptyService1(remote.Service):
pass
@root_api2.api_class()
class EmptyService2(remote.Service):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json,
[EmptyService1, EmptyService2])
def testMultipleClassesUsingSingleApiDecorator(self):
"""Test an API that's split into multiple classes using @api."""
@api_config.api('api', 'v1')
class EmptyService1(remote.Service):
pass
@api_config.api('api', 'v1')
class EmptyService2(remote.Service):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json,
[EmptyService1, EmptyService2])
def testMultipleClassesRepeatedResourceName(self):
"""Test a multiclass API that reuses a resource_name."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class(resource_name='repeated')
class Service1(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET', path='get')
def get(self, request):
pass
@root_api.api_class(resource_name='repeated')
class Service2(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='list', http_method='GET', path='list')
def list(self, request):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
[Service1, Service2]))
expected = {
'root.repeated.get': {
'httpMethod': 'GET',
'path': 'get',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'Service1.get',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.repeated.list': {
'httpMethod': 'GET',
'path': 'list',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'Service2.list',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesRepeatedMethodName(self):
"""Test a multiclass API that reuses a method name."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class(resource_name='repeated')
class Service1(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET')
def get(self, request):
pass
@root_api.api_class(resource_name='repeated')
class Service2(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='POST')
def get(self, request):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json,
[Service1, Service2])
def testRepeatedRestPathAndHttpMethod(self):
"""If the same HTTP method & path are reused, that should raise an error."""
@api_config.api(name='root', version='v1', hostname='example.appspot.com')
class MySimpleService(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='path', http_method='GET')
def Path1(self, unused_request):
return message_types.VoidMessage()
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='path', http_method='GET')
def Path2(self, unused_request):
return message_types.VoidMessage()
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json,
MySimpleService)
def testMulticlassRepeatedRestPathAndHttpMethod(self):
"""If the same HTTP method & path are reused, that should raise an error."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class(resource_name='resource1')
class Service1(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='path', http_method='GET')
def Path1(self, unused_request):
return message_types.VoidMessage()
@root_api.api_class(resource_name='resource2')
class Service2(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path='path', http_method='GET')
def Path2(self, unused_request):
return message_types.VoidMessage()
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json,
[Service1, Service2])
def testRepeatedRpcMethodName(self):
"""Test an API that reuses the same RPC name for two methods."""
@api_config.api('root', 'v1', hostname='example.appspot.com')
class MyService(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET', path='path1')
def get(self, request):
pass
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET', path='path2')
def another_get(self, request):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
self.generator.pretty_print_config_to_json, [MyService])
def testMultipleClassesRepeatedMethodNameUniqueResource(self):
"""Test a multiclass API reusing a method name but different resource."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class(resource_name='resource1')
class Service1(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET', path='get1')
def get(self, request):
pass
@root_api.api_class(resource_name='resource2')
class Service2(remote.Service):
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
name='get', http_method='GET', path='get2')
def get(self, request):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
[Service1, Service2]))
expected = {
'root.resource1.get': {
'httpMethod': 'GET',
'path': 'get1',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'Service1.get',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.resource2.get': {
'httpMethod': 'GET',
'path': 'get2',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'Service2.get',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesRepeatedMethodNameUniqueResourceParams(self):
"""Test the same method name with different args in different resources."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
class Request1(messages.Message):
bool_value = messages.BooleanField(1)
class Response1(messages.Message):
bool_value = messages.BooleanField(1)
class Request2(messages.Message):
bool_value = messages.BooleanField(1)
class Response2(messages.Message):
bool_value = messages.BooleanField(1)
@root_api.api_class(resource_name='resource1')
class Service1(remote.Service):
@api_config.method(Request1, Response1,
name='get', http_method='GET', path='get1')
def get(self, request):
pass
@root_api.api_class(resource_name='resource2')
class Service2(remote.Service):
@api_config.method(Request2, Response2,
name='get', http_method='GET', path='get2')
def get(self, request):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
[Service1, Service2]))
expected = {
'root.resource1.get': {
'httpMethod': 'GET',
'path': 'get1',
'request': {
'body': 'empty',
'parameters': {
'bool_value': {
'type': 'boolean'
}
}
},
'response': {'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'Service1.get',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.resource2.get': {
'httpMethod': 'GET',
'path': 'get2',
'request': {
'body': 'empty',
'parameters': {
'bool_value': {
'type': 'boolean'
}
}
},
'response': {'body': 'autoTemplate(backendResponse)',
'bodyName': 'resource'},
'rosyMethod': 'Service2.get',
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
expected_descriptor = {
'methods': {
'Service1.get': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse1')
}
},
'Service2.get': {
'response': {
'$ref': (_DESCRIPTOR_PATH_PREFIX +
'ApiConfigTestResponse2')
}
}
},
'schemas': {
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse1',
'properties': {
'bool_value': {
'type': 'boolean'
}
},
'type': 'object'
},
_DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2': {
'id': _DESCRIPTOR_PATH_PREFIX + 'ApiConfigTestResponse2',
'properties': {
'bool_value': {
'type': 'boolean'
}
},
'type': 'object'
}
}
}
test_util.AssertDictEqual(expected_descriptor, api['descriptor'], self)
def testMultipleClassesNoResourceName(self):
"""Test a multiclass API with a collection with no resource_name."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class()
class TestService(remote.Service):
@api_config.method(http_method='GET')
def donothing(self):
pass
@api_config.method(http_method='POST', name='alternate')
def foo(self):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
[TestService]))
expected = {
'root.donothing': {
'httpMethod': 'GET',
'path': 'donothing',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.donothing',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.alternate': {
'httpMethod': 'POST',
'path': 'foo',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.foo',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesBasePathInteraction(self):
"""Test path appending in a multiclass API."""
root_api = api_config.api('root', 'v1', hostname='example.appspot.com')
@root_api.api_class(path='base_path')
class TestService(remote.Service):
@api_config.method(http_method='GET')
def at_base(self):
pass
@api_config.method(http_method='GET', path='appended')
def append_to_base(self):
pass
@api_config.method(http_method='GET', path='appended/more')
def append_to_base2(self):
pass
@api_config.method(http_method='GET', path='/ignore_base')
def absolute(self):
pass
api = json.loads(self.generator.pretty_print_config_to_json(
[TestService]))
expected = {
'root.at_base': {
'httpMethod': 'GET',
'path': 'base_path/at_base',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.at_base',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.append_to_base': {
'httpMethod': 'GET',
'path': 'base_path/appended',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.append_to_base',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.append_to_base2': {
'httpMethod': 'GET',
'path': 'base_path/appended/more',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.append_to_base2',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
'root.absolute': {
'httpMethod': 'GET',
'path': 'ignore_base',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'TestService.absolute',
'clientIds': ['292824132082.apps.googleusercontent.com'],
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'authLevel': 'NONE',
},
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testMultipleClassesDifferentCollectionDefaults(self):
"""Test a multi-class API with settings overridden per collection."""
BASE_SCOPES = ['base_scope']
BASE_CLIENT_IDS = ['base_client_id']
root_api = api_config.api('root', 'v1', hostname='example.appspot.com',
audiences=['base_audience'],
scopes=BASE_SCOPES,
allowed_client_ids=BASE_CLIENT_IDS,
auth_level=AUTH_LEVEL.REQUIRED)
@root_api.api_class(resource_name='one', audiences=[])
class Service1(remote.Service):
pass
@root_api.api_class(resource_name='two', audiences=['audience2', 'foo'],
scopes=['service2_scope'],
allowed_client_ids=['s2_client_id'],
auth_level=AUTH_LEVEL.OPTIONAL)
class Service2(remote.Service):
pass
self.assertEqual(Service1.api_info.audiences, [])
self.assertEqual(Service1.api_info.scopes, BASE_SCOPES)
self.assertEqual(Service1.api_info.allowed_client_ids, BASE_CLIENT_IDS)
self.assertEqual(Service1.api_info.auth_level, AUTH_LEVEL.REQUIRED)
self.assertEqual(Service2.api_info.audiences, ['audience2', 'foo'])
self.assertEqual(Service2.api_info.scopes, ['service2_scope'])
self.assertEqual(Service2.api_info.allowed_client_ids, ['s2_client_id'])
self.assertEqual(Service2.api_info.auth_level, AUTH_LEVEL.OPTIONAL)
def testResourceContainerWarning(self):
"""Check the warning if a ResourceContainer isn't used when it should be."""
class TestGetRequest(messages.Message):
item_id = messages.StringField(1)
@api_config.api('myapi', 'v0', hostname='example.appspot.com')
class MyApi(remote.Service):
@api_config.method(TestGetRequest, message_types.VoidMessage,
path='test/{item_id}')
def Test(self, unused_request):
return message_types.VoidMessage()
# Verify that there's a warning and the name of the method is included
# in the warning.
logging.warning = mock.Mock()
self.generator.pretty_print_config_to_json(MyApi)
logging.warning.assert_called_with(mock.ANY, 'myapi.test')
def testFieldInPathWithBodyIsRequired(self):
# TODO(kdeus): When the backwards compatibility for non-ResourceContainer
# parameters requests is removed, this class and the
# accompanying method should be removed.
class ItemsUpdateRequest(messages.Message):
itemId = messages.StringField(1)
items_update_request_container = resource_container.ResourceContainer(
**{field.name: field for field in ItemsUpdateRequest.all_fields()})
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(ItemsUpdateRequest, message_types.VoidMessage,
path='items/{itemId}', name='items.update',
http_method='PUT')
def items_update(self, unused_request):
return message_types.VoidMessage()
@api_config.method(items_update_request_container,
path='items/container/{itemId}',
name='items.updateContainer',
http_method='PUT')
def items_update_container(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
params = {'itemId': {'required': True,
'type': 'string'}}
param_order = ['itemId']
items_update_config = {
'httpMethod': 'PUT',
'path': 'items/{itemId}',
'request': {'body': 'autoTemplate(backendRequest)',
'bodyName': 'resource',
'parameters': params,
'parameterOrder': param_order},
'response': {'body': 'empty'},
'rosyMethod': 'MyService.items_update',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
}
update_container_cfg = items_update_config.copy()
update_container_cfg['path'] = 'items/container/{itemId}'
update_container_cfg['rosyMethod'] = 'MyService.items_update_container'
# Since we don't have a body in our container, the request will be empty.
request = update_container_cfg['request'].copy()
request.pop('bodyName')
request['body'] = 'empty'
update_container_cfg['request'] = request
expected = {
'root.items.update': items_update_config,
'root.items.updateContainer': update_container_cfg,
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testFieldInPathNoBodyIsRequired(self):
class ItemsGetRequest(messages.Message):
itemId = messages.StringField(1)
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(ItemsGetRequest, message_types.VoidMessage,
path='items/{itemId}', name='items.get',
http_method='GET')
def items_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
params = {'itemId': {'required': True,
'type': 'string'}}
param_order = ['itemId']
expected = {
'root.items.get': {
'httpMethod': 'GET',
'path': 'items/{itemId}',
'request': {'body': 'empty',
'parameters': params,
'parameterOrder': param_order},
'response': {'body': 'empty'},
'rosyMethod': 'MyService.items_get',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE',
}
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testAuthLevelRequired(self):
class ItemsGetRequest(messages.Message):
itemId = messages.StringField(1)
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
"""Describes MyService."""
@api_config.method(ItemsGetRequest, message_types.VoidMessage,
path='items/{itemId}', name='items.get',
http_method='GET', auth_level=AUTH_LEVEL.REQUIRED)
def items_get(self, unused_request):
return message_types.VoidMessage()
api = json.loads(self.generator.pretty_print_config_to_json(MyService))
params = {'itemId': {'required': True,
'type': 'string'}}
param_order = ['itemId']
expected = {
'root.items.get': {
'httpMethod': 'GET',
'path': 'items/{itemId}',
'request': {'body': 'empty',
'parameters': params,
'parameterOrder': param_order},
'response': {'body': 'empty'},
'rosyMethod': 'MyService.items_get',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'REQUIRED',
}
}
test_util.AssertDictEqual(expected, api['methods'], self)
def testCustomUrl(self):
test_request = resource_container.ResourceContainer(
message_types.VoidMessage,
id=messages.IntegerField(1, required=True))
@api_config.api(name='testapicustomurl', version='v3',
hostname='example.appspot.com',
description='A wonderful API.', base_path='/my/base/path/')
class TestServiceCustomUrl(remote.Service):
@api_config.method(test_request,
message_types.VoidMessage,
http_method='DELETE', path='items/{id}')
# Silence lint warning about method naming conventions
# pylint: disable=g-bad-name
def delete(self, unused_request):
return message_types.VoidMessage()
api = json.loads(
self.generator.pretty_print_config_to_json(TestServiceCustomUrl))
expected_adapter = {
'bns': 'https://example.appspot.com/my/base/path',
'type': 'lily',
'deadline': 10.0
}
test_util.AssertDictEqual(expected_adapter, api['adapter'], self)
class ApiConfigParamsDescriptorTest(unittest.TestCase):
def setUp(self):
self.generator = ApiConfigGenerator()
class OtherRefClass(messages.Message):
three = messages.BooleanField(1, repeated=True)
four = messages.FloatField(2, required=True)
five = messages.IntegerField(3, default=42)
self.other_ref_class = OtherRefClass
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.MessageField(OtherRefClass, 2)
not_two = messages.MessageField(OtherRefClass, 3, required=True)
self.ref_class = RefClass
class RefClassForContainer(messages.Message):
not_two = messages.MessageField(OtherRefClass, 3, required=True)
ref_class_container = resource_container.ResourceContainer(
RefClassForContainer,
one=messages.StringField(1),
two=messages.MessageField(OtherRefClass, 2))
@api_config.api(name='root', hostname='example.appspot.com', version='v1')
class MyService(remote.Service):
@api_config.method(RefClass, RefClass,
name='entries.get',
path='/a/{two.three}/{two.four}',
http_method='GET')
def entries_get(self, request):
return request
@api_config.method(RefClass, RefClass,
name='entries.put',
path='/b/{two.three}/{one}',
http_method='PUT')
def entries_put(self, request):
return request
# Flatten the fields intended for the put request into only parameters.
# This would not be a typical use, but is done to adhere to the behavior
# in the non-ResourceContainer case.
get_request_container = resource_container.ResourceContainer(
**{field.name: field for field in
ref_class_container.combined_message_class.all_fields()})
@api_config.method(get_request_container, RefClass,
name='entries.getContainer',
path='/a/container/{two.three}/{two.four}',
http_method='GET')
def entries_get_container(self, request):
return request
@api_config.method(ref_class_container, RefClass,
name='entries.putContainer',
path='/b/container/{two.three}/{one}',
http_method='PUT')
def entries_put_container(self, request):
return request
self.api_str = self.generator.pretty_print_config_to_json(MyService)
self.api = json.loads(self.api_str)
self.m_field = messages.MessageField(RefClass, 1)
self.m_field.name = 'm_field'
def GetPrivateMethod(self, attr_name):
protected_attr_name = '_ApiConfigGenerator__' + attr_name
return getattr(self.generator, protected_attr_name)
def testFieldToSubfieldsSimpleField(self):
m_field = messages.StringField(1)
expected = [[m_field]]
self.assertItemsEqual(expected,
self.GetPrivateMethod('field_to_subfields')(m_field))
def testFieldToSubfieldsSingleMessageField(self):
class RefClass(messages.Message):
one = messages.StringField(1)
two = messages.IntegerField(2)
m_field = messages.MessageField(RefClass, 1)
expected = [
[m_field, RefClass.one],
[m_field, RefClass.two],
]
self.assertItemsEqual(expected,
self.GetPrivateMethod('field_to_subfields')(m_field))
def testFieldToSubfieldsDifferingDepth(self):
expected = [
[self.m_field, self.ref_class.one],
[self.m_field, self.ref_class.two, self.other_ref_class.three],
[self.m_field, self.ref_class.two, self.other_ref_class.four],
[self.m_field, self.ref_class.two, self.other_ref_class.five],
[self.m_field, self.ref_class.not_two, self.other_ref_class.three],
[self.m_field, self.ref_class.not_two, self.other_ref_class.four],
[self.m_field, self.ref_class.not_two, self.other_ref_class.five],
]
self.assertItemsEqual(
expected, self.GetPrivateMethod('field_to_subfields')(self.m_field))
def testGetPathParameters(self):
get_path_parameters = self.GetPrivateMethod('get_path_parameters')
expected = {
'c': ['c'],
'd': ['d.e'],
}
test_util.AssertDictEqual(
expected, get_path_parameters('/a/b/{c}/{d.e}/{}'), self)
test_util.AssertDictEqual(
{}, get_path_parameters('/stray{/brackets{in/the}middle'), self)
def testValidatePathParameters(self):
# This also tests __validate_simple_subfield indirectly
validate_path_parameters = self.GetPrivateMethod('validate_path_parameters')
self.assertRaises(TypeError, validate_path_parameters,
self.m_field, ['x'])
self.assertRaises(TypeError, validate_path_parameters,
self.m_field, ['m_field'])
self.assertRaises(TypeError, validate_path_parameters,
self.m_field, ['m_field.one_typo'])
# This should not fail
validate_path_parameters(self.m_field, ['m_field.one'])
def MethodDescriptorTest(self, method_name, path, param_order, parameters):
method_descriptor = self.api['methods'][method_name]
self.assertEqual(method_descriptor['path'], path)
request_descriptor = method_descriptor['request']
self.assertEqual(param_order, request_descriptor['parameterOrder'])
self.assertEqual(parameters, request_descriptor['parameters'])
def testParametersDescriptorEntriesGet(self):
parameters = {
'one': {
'type': 'string',
},
'two.three': {
'repeated': True,
'required': True,
'type': 'boolean',
},
'two.four': {
'required': True,
'type': 'double',
},
'two.five': {
'default': 42,
'type': 'int64'
},
'not_two.three': {
'repeated': True,
'type': 'boolean',
},
'not_two.four': {
'required': True,
'type': 'double',
},
'not_two.five': {
'default': 42,
'type': 'int64'
},
}
# Without container.
self.MethodDescriptorTest('root.entries.get', 'a/{two.three}/{two.four}',
['two.three', 'two.four', 'not_two.four'],
parameters)
# With container.
self.MethodDescriptorTest('root.entries.getContainer',
'a/container/{two.three}/{two.four}',
# Not parameter order differs because of the way
# combined_message_class combines classes. This
# is not so big a deal.
['not_two.four', 'two.three', 'two.four'],
parameters)
def testParametersDescriptorEntriesPut(self):
param_order = ['one', 'two.three']
parameters = {
'one': {
'required': True,
'type': 'string',
},
'two.three': {
'repeated': True,
'required': True,
'type': 'boolean',
},
'two.four': {
'type': 'double',
},
'two.five': {
'default': 42,
'type': 'int64'
},
}
# Without container.
self.MethodDescriptorTest('root.entries.put', 'b/{two.three}/{one}',
param_order, parameters)
# With container.
self.MethodDescriptorTest('root.entries.putContainer',
'b/container/{two.three}/{one}',
param_order, parameters)
class ApiDecoratorTest(unittest.TestCase):
def testApiInfoPopulated(self):
@api_config.api(name='CoolService', version='vX',
description='My Cool Service', hostname='myhost.com',
canonical_name='Cool Service Name')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
pass
api_info = MyDecoratedService.api_info
self.assertEqual('CoolService', api_info.name)
self.assertEqual('vX', api_info.version)
self.assertEqual('My Cool Service', api_info.description)
self.assertEqual('myhost.com', api_info.hostname)
self.assertEqual('Cool Service Name', api_info.canonical_name)
self.assertIsNone(api_info.audiences)
self.assertEqual([api_config.EMAIL_SCOPE], api_info.scopes)
self.assertEqual([api_config.API_EXPLORER_CLIENT_ID],
api_info.allowed_client_ids)
self.assertEqual(AUTH_LEVEL.NONE, api_info.auth_level)
self.assertEqual(None, api_info.resource_name)
self.assertEqual(None, api_info.path)
def testApiInfoDefaults(self):
@api_config.api('CoolService2', 'v2')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
pass
api_info = MyDecoratedService.api_info
self.assertEqual('CoolService2', api_info.name)
self.assertEqual('v2', api_info.version)
self.assertEqual(None, api_info.description)
self.assertEqual(None, api_info.hostname)
self.assertEqual(None, api_info.canonical_name)
self.assertEqual(None, api_info.title)
self.assertEqual(None, api_info.documentation)
def testGetApiClassesSingle(self):
"""Test that get_api_classes works when one class has been decorated."""
my_api = api_config.api(name='My Service', version='v1')
@my_api
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
self.assertEqual([MyDecoratedService], my_api.get_api_classes())
def testGetApiClassesSingleCollection(self):
"""Test that get_api_classes works with the collection() decorator."""
my_api = api_config.api(name='My Service', version='v1')
@my_api.api_class(resource_name='foo')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
self.assertEqual([MyDecoratedService], my_api.get_api_classes())
def testGetApiClassesMultiple(self):
"""Test that get_api_classes works with multiple classes."""
my_api = api_config.api(name='My Service', version='v1')
@my_api.api_class(resource_name='foo')
class MyDecoratedService1(remote.Service):
"""Describes MyDecoratedService."""
@my_api.api_class(resource_name='bar')
class MyDecoratedService2(remote.Service):
"""Describes MyDecoratedService."""
@my_api.api_class(resource_name='baz')
class MyDecoratedService3(remote.Service):
"""Describes MyDecoratedService."""
self.assertEqual([MyDecoratedService1, MyDecoratedService2,
MyDecoratedService3], my_api.get_api_classes())
def testGetApiClassesMixedStyles(self):
"""Test that get_api_classes works when decorated differently."""
my_api = api_config.api(name='My Service', version='v1')
# @my_api is equivalent to @my_api.api_class(). This is allowed, though
# mixing styles like this shouldn't be encouraged.
@my_api
class MyDecoratedService1(remote.Service):
"""Describes MyDecoratedService."""
@my_api
class MyDecoratedService2(remote.Service):
"""Describes MyDecoratedService."""
@my_api.api_class(resource_name='foo')
class MyDecoratedService3(remote.Service):
"""Describes MyDecoratedService."""
self.assertEqual([MyDecoratedService1, MyDecoratedService2,
MyDecoratedService3], my_api.get_api_classes())
class MethodDecoratorTest(unittest.TestCase):
def testMethodId(self):
@api_config.api('foo', 'v2')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
@api_config.method()
def get(self):
pass
@api_config.method()
def people(self):
pass
@api_config.method()
def _get(self):
pass
@api_config.method()
def get_(self):
pass
@api_config.method()
def _(self):
pass
@api_config.method()
def _____(self):
pass
@api_config.method()
def people_update(self):
pass
@api_config.method()
def people_search(self):
pass
# pylint: disable=g-bad-name
@api_config.method()
def _several_underscores__in_various___places__(self):
pass
test_cases = [
('get', 'foo.get'),
('people', 'foo.people'),
('_get', 'foo.get'),
('get_', 'foo.get_'),
('_', 'foo.'),
('_____', 'foo.'),
('people_update', 'foo.people_update'),
('people_search', 'foo.people_search'),
('_several_underscores__in_various___places__',
'foo.several_underscores__in_various___places__')
]
for protorpc_method_name, expected in test_cases:
method_id = ''
info = getattr(MyDecoratedService, protorpc_method_name, None)
self.assertIsNotNone(info)
method_id = info.method_info.method_id(MyDecoratedService.api_info)
self.assertEqual(expected, method_id,
'unexpected result (%s) for: %s' %
(method_id, protorpc_method_name))
def testMethodInfoPopulated(self):
@api_config.api(name='CoolService', version='vX',
description='My Cool Service', hostname='myhost.com')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
@api_config.method(request_message=Nested,
response_message=AllFields,
name='items.operate',
path='items',
http_method='GET',
scopes=['foo'],
audiences=['bar'],
allowed_client_ids=['baz', 'bim'],
auth_level=AUTH_LEVEL.REQUIRED)
def my_method(self):
pass
method_info = MyDecoratedService.my_method.method_info
protorpc_info = MyDecoratedService.my_method.remote
self.assertEqual(Nested, protorpc_info.request_type)
self.assertEqual(AllFields, protorpc_info.response_type)
self.assertEqual('items.operate', method_info.name)
self.assertEqual('items', method_info.get_path(MyDecoratedService.api_info))
self.assertEqual('GET', method_info.http_method)
self.assertEqual(['foo'], method_info.scopes)
self.assertEqual(['bar'], method_info.audiences)
self.assertEqual(['baz', 'bim'], method_info.allowed_client_ids)
self.assertEqual(AUTH_LEVEL.REQUIRED, method_info.auth_level)
def testMethodInfoDefaults(self):
@api_config.api('CoolService2', 'v2')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
@api_config.method()
def my_method(self):
pass
method_info = MyDecoratedService.my_method.method_info
protorpc_info = MyDecoratedService.my_method.remote
self.assertEqual(message_types.VoidMessage, protorpc_info.request_type)
self.assertEqual(message_types.VoidMessage, protorpc_info.response_type)
self.assertEqual('my_method', method_info.name)
self.assertEqual('my_method',
method_info.get_path(MyDecoratedService.api_info))
self.assertEqual('POST', method_info.http_method)
self.assertEqual(None, method_info.scopes)
self.assertEqual(None, method_info.audiences)
self.assertEqual(None, method_info.allowed_client_ids)
self.assertEqual(None, method_info.auth_level)
def testMethodInfoPath(self):
class MyRequest(messages.Message):
"""Documentation for MyRequest."""
zebra = messages.StringField(1, required=True)
kitten = messages.StringField(2, required=True)
dog = messages.StringField(3)
panda = messages.StringField(4, required=True)
@api_config.api('CoolService3', 'v3')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
@api_config.method(MyRequest, message_types.VoidMessage)
def default_path_method(self):
pass
@api_config.method(MyRequest, message_types.VoidMessage,
path='zebras/{zebra}/pandas/{panda}/kittens/{kitten}')
def specified_path_method(self):
pass
specified_path_info = MyDecoratedService.specified_path_method.method_info
specified_protorpc_info = MyDecoratedService.specified_path_method.remote
self.assertEqual(MyRequest, specified_protorpc_info.request_type)
self.assertEqual(message_types.VoidMessage,
specified_protorpc_info.response_type)
self.assertEqual('specified_path_method', specified_path_info.name)
self.assertEqual('zebras/{zebra}/pandas/{panda}/kittens/{kitten}',
specified_path_info.get_path(MyDecoratedService.api_info))
self.assertEqual('POST', specified_path_info.http_method)
self.assertEqual(None, specified_path_info.scopes)
self.assertEqual(None, specified_path_info.audiences)
self.assertEqual(None, specified_path_info.allowed_client_ids)
self.assertEqual(None, specified_path_info.auth_level)
default_path_info = MyDecoratedService.default_path_method.method_info
default_protorpc_info = MyDecoratedService.default_path_method.remote
self.assertEqual(MyRequest, default_protorpc_info.request_type)
self.assertEqual(message_types.VoidMessage,
default_protorpc_info.response_type)
self.assertEqual('default_path_method', default_path_info.name)
self.assertEqual('default_path_method',
default_path_info.get_path(MyDecoratedService.api_info))
self.assertEqual('POST', default_path_info.http_method)
self.assertEqual(None, default_path_info.scopes)
self.assertEqual(None, default_path_info.audiences)
self.assertEqual(None, default_path_info.allowed_client_ids)
self.assertEqual(None, specified_path_info.auth_level)
def testInvalidPaths(self):
for path in ('invalid/mixed{param}',
'invalid/{param}mixed',
'invalid/mixed{param}mixed',
'invalid/{extra}{vars}',
'invalid/{}/emptyvar'):
@api_config.api('root', 'v1')
class MyDecoratedService(remote.Service):
"""Describes MyDecoratedService."""
@api_config.method(message_types.VoidMessage, message_types.VoidMessage,
path=path)
def test(self):
pass
self.assertRaises(api_exceptions.ApiConfigurationError,
MyDecoratedService.test.method_info.get_path,
MyDecoratedService.api_info)
def testMethodAttributeInheritance(self):
"""Test descriptor attributes that can be inherited from the main config."""
self.TryListAttributeVariations('audiences', 'audiences', None)
self.TryListAttributeVariations(
'scopes', 'scopes',
['https://www.googleapis.com/auth/userinfo.email'])
self.TryListAttributeVariations('allowed_client_ids', 'clientIds',
[api_config.API_EXPLORER_CLIENT_ID])
def TryListAttributeVariations(self, attribute_name, config_name,
default_expected):
"""Test setting an attribute in the API config and method configs.
The audiences, scopes and allowed_client_ids settings can be set
in either the main API config or on each of the methods. This helper
function tests each variation of one of these (whichever is specified)
and ensures that the api config has the right values.
Args:
attribute_name: Name of the keyword arg to pass to the api or method
decorator. Also the name of the attribute used to access that
variable on api_info or method_info.
config_name: Name of the variable as it appears in the configuration
output.
default_expected: The default expected value if the attribute isn't
specified on either the api or the method.
"""
# Try the various combinations of api-level and method-level settings.
# Test cases are: (api-setting, method-setting, expected)
test_cases = ((None, ['foo', 'bar'], ['foo', 'bar']),
(None, [], None),
(['foo', 'bar'], None, ['foo', 'bar']),
(['foo', 'bar'], ['foo', 'bar'], ['foo', 'bar']),
(['foo', 'bar'], ['foo', 'baz'], ['foo', 'baz']),
(['foo', 'bar'], [], None),
(['foo', 'bar'], ['abc'], ['abc']),
(None, None, default_expected))
for api_value, method_value, expected_value in test_cases:
api_kwargs = {attribute_name: api_value}
method_kwargs = {attribute_name: method_value}
@api_config.api('AuthService', 'v1', hostname='example.appspot.com',
**api_kwargs)
class AuthServiceImpl(remote.Service):
"""Describes AuthServiceImpl."""
@api_config.method(**method_kwargs)
def baz(self):
pass
self.assertEqual(api_value if api_value is not None else default_expected,
getattr(AuthServiceImpl.api_info, attribute_name))
self.assertEqual(method_value,
getattr(AuthServiceImpl.baz.method_info, attribute_name))
generator = ApiConfigGenerator()
api = json.loads(generator.pretty_print_config_to_json(AuthServiceImpl))
expected = {
'authService.baz': {
'httpMethod': 'POST',
'path': 'baz',
'request': {'body': 'empty'},
'response': {'body': 'empty'},
'rosyMethod': 'AuthServiceImpl.baz',
'scopes': ['https://www.googleapis.com/auth/userinfo.email'],
'clientIds': [api_config.API_EXPLORER_CLIENT_ID],
'authLevel': 'NONE'
}
}
if expected_value:
expected['authService.baz'][config_name] = expected_value
elif config_name in expected['authService.baz']:
del expected['authService.baz'][config_name]
test_util.AssertDictEqual(expected, api['methods'], self)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
tmhm/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
skdaccess/skdaccess | skdaccess/geo/wyoming_sounding/stream/data_fetcher.py | 2 | 3665 | # The MIT License (MIT)
# Copyright (c) 2018 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Scikit Data Access imports
from skdaccess.framework.data_class import DataFetcherStream, TableWrapper
from skdaccess.utilities.sounding_util import SoundingParser, generateQueries
# 3rd party imports
import pandas as pd
import numpy as np
from six.moves.urllib.request import urlopen
# Standard library imports
from collections import OrderedDict
class DataFetcher(DataFetcherStream):
''' DataFetcher for retrieving Wyoming Sounding data '''
def __init__(self, station_number, year, month, day_start, day_end, start_hour = 0, end_hour = 12):
'''
Initialize Data Fetcher
@param station_number: Station number
@param year: Input year
@param month: Input month (Integer for a single month, or a list of integers for multiple months)
@param day_start: First day of the month to include
@param day_end: Last day of the month to include
@param start_hour: Starting hour (may be either 0 or 12)
@param end_hour: Ending hour (may be either 0 or 12)
'''
self.station_number = station_number
if np.isscalar(year):
self.year_list = [year]
else:
self.year_list = year
if np.isscalar(month):
self.month_list = [month]
else:
self.month_list = month
self.day_start = day_start
self.day_end = day_end
self.start_hour = start_hour
self.end_hour = end_hour
super(DataFetcher, self).__init__()
def output(self, shared_lock = None, shared_list = None):
'''
Generate data wrapper
@return Wyoming sounding data in a data wrapper
'''
full_results_dict = OrderedDict()
full_meta_dict = OrderedDict()
for query_url in generateQueries(self.station_number, self.year_list, self.month_list, self.day_start,
self.day_end, self.start_hour, self.end_hour):
with urlopen(query_url) as in_data:
sp = SoundingParser()
sp.feed(in_data.read().decode())
for key, data in sp.data_dict.items():
full_results_dict[key] = data
for key, data in sp.metadata_dict.items():
full_meta_dict[key] = data
return TableWrapper(obj_wrap = full_results_dict, meta_data = full_meta_dict)
| mit |
and2egg/philharmonic | philharmonic/energy_meter/continuous_energy_meter.py | 2 | 4626 | '''
Created on Jun 18, 2012
@author: kermit
'''
import threading
import time
from Queue import Empty
import numpy as np
import pandas as pd
from datetime import datetime
import logging
import os
import pickle
from datetime import timedelta
import copy
from haley_api import Wattmeter
from philharmonic.energy_meter.exception import SilentWattmeterError
from philharmonic.timeseries.calculator import synthetic_power, \
build_synth_measurement
from philharmonic.timeseries.historian import deserialize_folder
def log(message):
print(message)
logging.info(message)
class ContinuousEnergyMeter(threading.Thread):
"""An energy meter that runs in the background (in a separate thread)
and reads experiment measurements.
"""
def __init__(self, machines, metrics, interval,
location="energy_data.pickle"):
'''
Constructor
@param machines: list of hostnames of machines to monitor
@param metrics: list of method objects that the energy meter will
perform and get the results of
@param interval: number of seconds to wait between measurements
@param location: where to store the time series pickle
Builds an internal representation in self.data as a multi-index
Dataframe, e.g.:
machine metric 14:24:24 14:24:25 ...
------------------------------------------------------------------------
snowwhite active_power 38 39
apparent_power 57 55
bashful active_power 50 47
apparent_power 78 80
------------------------------------------------------------------------
'''
threading.Thread.__init__(self)
#self.q = q
self.machines = machines
self.metrics = metrics
self.interval = interval
self.location = location
self.energy_meter =Wattmeter()
#this is under haley_api now
index_tuples = [(machine, metric) for machine
in self.machines for metric in self.metrics]
index = pd.MultiIndex.from_tuples(index_tuples,
names=["machine", "metric"])
self.data = pd.DataFrame({}, index = index)
logging.basicConfig(filename='io/energy_meter.log', level=logging.DEBUG,
format='%(asctime)s %(message)s')
log("\n-------------\nENERGY_METER\n-------------")
log("#wattmeter#start")
def get_all_data(self):
"""
@return: DataFrame containing measurements collected so far
"""
return self.data
def _add_current_data(self):
"""
Fetch current measurements from the energy meter
and add them to the past ones.
"""
# new_values = []
# for machine, metric in self.index_tuples:
# new_values.append(self.energy_meter.measure_single(machine,
# metric))
# new_series = pd.Series(new_values, index = self.index)
try:
new_series = self.energy_meter.measure_multiple(self.machines,
self.metrics)
except SilentWattmeterError:
log("Wattmeter doesn't respond too long. Quitting.")
self._finalize()
raise
current_time = datetime.now()
self.data[current_time] = new_series
def _only_active_power(self):
"""Edit the data, so that we only take active_power, E.g.:
machine dopey doc
2014-10-21 16:57:24.347162 18 25
2014-10-21 16:57:24.833088 18 25
2014-10-21 16:57:25.363600 18 25
2014-10-21 16:57:25.893650 18 25
"""
self.data = self.data.xs('active_power', level='metric').transpose()
def _finalize(self):
self._only_active_power()
self.data.to_pickle(self.location)
log("#wattmeter#end")
log("-------------\n")
def run(self):
while True:
self._add_current_data()
time.sleep(self.interval)
try:
message = self.q.get_nowait()
if message == 'quit':
self._finalize()
self.q.put(self.data)
return
except Empty:
pass
print("Stopping background measurements.")
| gpl-3.0 |
robbymeals/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
mkukielka/oddt | docs/conf.py | 1 | 12238 | # -*- coding: utf-8 -*-
#
# ODDT documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 25 13:49:30 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'numpydoc',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members', 'imported-members']
autoclass_content = 'init'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Open Drug Discovery Toolkit'
copyright = u'2015, Maciej Wojcikowski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from oddt import __version__ as VERSION
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ODDTdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ODDT.tex', u'ODDT Documentation',
u'Maciej Wojcikowski', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Open Drug Discovery Toolkit', u'ODDT Documentation',
[u'Maciej Wojcikowski'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Open Drug Discovery Toolkit', u'ODDT Documentation',
u'Maciej Wojcikowski', 'Open Drug Discovery Toolkit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Open Drug Discovery Toolkit'
epub_author = u'Maciej Wojcikowski'
epub_publisher = u'Maciej Wojcikowski'
epub_copyright = u'2015, Maciej Wojcikowski'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Open Drug Discovery Toolkit'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {}
intersphinx_mapping['sklearn'] = ('http://scikit-learn.org/stable', None)
intersphinx_mapping['numpy'] = ('http://docs.scipy.org/doc/numpy/', None)
intersphinx_mapping['scipy'] = ('http://docs.scipy.org/doc/scipy/reference/', None)
# Ignore some modules during documentation building on readthedocs.org
if os.environ.get('READTHEDOCS', None) == 'True':
# Invoke sphinx-apidoc
os.system("sphinx-apidoc -f -o rst/ ../oddt")
try:
from unittest.mock import MagicMock # Python 3.3
except ImportError:
from mock import patch, MagicMock
pybel = MagicMock()
openbabel = MagicMock()
rdkit = MagicMock()
modules = {
# OpenBabel
'pybel': pybel,
'openbabel' : openbabel,
# RDK
'rdkit': rdkit,
'rdkit.Chem': rdkit.Chem,
'rdkit.DataStructs': rdkit.DataStructs,
'rdkit.Chem.MACCSkeys': rdkit.Chem.MACCSkeys,
'rdkit.Chem.AtomPairs': rdkit.Chem.AtomPairs,
'rdkit.Chem.AtomPairs.Pairs': rdkit.Chem.AtomPairs.Pairs,
'rdkit.Chem.AtomPairs.Torsions': rdkit.Chem.AtomPairs.Torsions,
'rdkit.Chem.Lipinski': rdkit.Chem.Lipinski,
'rdkit.Chem.AllChem': rdkit.Chem.AllChem,
'rdkit.Chem.Pharm2D': rdkit.Chem.Pharm2D,
}
p = patch.dict('sys.modules', modules)
p.start()
| bsd-3-clause |
LiaoPan/sklearn_pycon2015 | notebooks/fig_code/figures.py | 34 | 8633 | import numpy as np
import matplotlib.pyplot as plt
import warnings
def plot_venn_diagram():
fig, ax = plt.subplots(subplot_kw=dict(frameon=False, xticks=[], yticks=[]))
ax.add_patch(plt.Circle((0.3, 0.3), 0.3, fc='red', alpha=0.5))
ax.add_patch(plt.Circle((0.6, 0.3), 0.3, fc='blue', alpha=0.5))
ax.add_patch(plt.Rectangle((-0.1, -0.1), 1.1, 0.8, fc='none', ec='black'))
ax.text(0.2, 0.3, '$x$', size=30, ha='center', va='center')
ax.text(0.7, 0.3, '$y$', size=30, ha='center', va='center')
ax.text(0.0, 0.6, '$I$', size=30)
ax.axis('equal')
def plot_example_decision_tree():
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0, 0, 0.8, 1], frameon=False, xticks=[], yticks=[])
ax.set_title('Example Decision Tree: Animal Classification', size=24)
def text(ax, x, y, t, size=20, **kwargs):
ax.text(x, y, t,
ha='center', va='center', size=size,
bbox=dict(boxstyle='round', ec='k', fc='w'), **kwargs)
text(ax, 0.5, 0.9, "How big is\nthe animal?", 20)
text(ax, 0.3, 0.6, "Does the animal\nhave horns?", 18)
text(ax, 0.7, 0.6, "Does the animal\nhave two legs?", 18)
text(ax, 0.12, 0.3, "Are the horns\nlonger than 10cm?", 14)
text(ax, 0.38, 0.3, "Is the animal\nwearing a collar?", 14)
text(ax, 0.62, 0.3, "Does the animal\nhave wings?", 14)
text(ax, 0.88, 0.3, "Does the animal\nhave a tail?", 14)
text(ax, 0.4, 0.75, "> 1m", 12, alpha=0.4)
text(ax, 0.6, 0.75, "< 1m", 12, alpha=0.4)
text(ax, 0.21, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.34, 0.45, "no", 12, alpha=0.4)
text(ax, 0.66, 0.45, "yes", 12, alpha=0.4)
text(ax, 0.79, 0.45, "no", 12, alpha=0.4)
ax.plot([0.3, 0.5, 0.7], [0.6, 0.9, 0.6], '-k')
ax.plot([0.12, 0.3, 0.38], [0.3, 0.6, 0.3], '-k')
ax.plot([0.62, 0.7, 0.88], [0.3, 0.6, 0.3], '-k')
ax.plot([0.0, 0.12, 0.20], [0.0, 0.3, 0.0], '--k')
ax.plot([0.28, 0.38, 0.48], [0.0, 0.3, 0.0], '--k')
ax.plot([0.52, 0.62, 0.72], [0.0, 0.3, 0.0], '--k')
ax.plot([0.8, 0.88, 1.0], [0.0, 0.3, 0.0], '--k')
ax.axis([0, 1, 0, 1])
def visualize_tree(estimator, X, y, boundaries=True,
xlim=None, ylim=None):
estimator.fit(X, y)
if xlim is None:
xlim = (X[:, 0].min() - 0.1, X[:, 0].max() + 0.1)
if ylim is None:
ylim = (X[:, 1].min() - 0.1, X[:, 1].max() + 0.1)
x_min, x_max = xlim
y_min, y_max = ylim
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, alpha=0.2, cmap='rainbow')
plt.clim(y.min(), y.max())
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow')
plt.axis('off')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.clim(y.min(), y.max())
# Plot the decision boundaries
def plot_boundaries(i, xlim, ylim):
if i < 0:
return
tree = estimator.tree_
if tree.feature[i] == 0:
plt.plot([tree.threshold[i], tree.threshold[i]], ylim, '-k')
plot_boundaries(tree.children_left[i],
[xlim[0], tree.threshold[i]], ylim)
plot_boundaries(tree.children_right[i],
[tree.threshold[i], xlim[1]], ylim)
elif tree.feature[i] == 1:
plt.plot(xlim, [tree.threshold[i], tree.threshold[i]], '-k')
plot_boundaries(tree.children_left[i], xlim,
[ylim[0], tree.threshold[i]])
plot_boundaries(tree.children_right[i], xlim,
[tree.threshold[i], ylim[1]])
if boundaries:
plot_boundaries(0, plt.xlim(), plt.ylim())
def plot_tree_interactive(X, y):
from sklearn.tree import DecisionTreeClassifier
def interactive_tree(depth=1):
clf = DecisionTreeClassifier(max_depth=depth, random_state=0)
visualize_tree(clf, X, y)
from IPython.html.widgets import interact
return interact(interactive_tree, depth=[1, 5])
def plot_kmeans_interactive(min_clusters=1, max_clusters=6):
from IPython.html.widgets import interact
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
X, y = make_blobs(n_samples=300, centers=4,
random_state=0, cluster_std=0.60)
def _kmeans_step(frame=0, n_clusters=4):
rng = np.random.RandomState(2)
labels = np.zeros(X.shape[0])
centers = rng.randn(n_clusters, 2)
nsteps = frame // 3
for i in range(nsteps + 1):
old_centers = centers
if i < nsteps or frame % 3 > 0:
dist = euclidean_distances(X, centers)
labels = dist.argmin(1)
if i < nsteps or frame % 3 > 1:
centers = np.array([X[labels == j].mean(0)
for j in range(n_clusters)])
nans = np.isnan(centers)
centers[nans] = old_centers[nans]
# plot the data and cluster centers
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='rainbow',
vmin=0, vmax=n_clusters - 1);
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(old_centers[:, 0], old_centers[:, 1], marker='o',
c='black', s=50)
# plot new centers if third frame
if frame % 3 == 2:
for i in range(n_clusters):
plt.annotate('', centers[i], old_centers[i],
arrowprops=dict(arrowstyle='->', linewidth=1))
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c=np.arange(n_clusters),
s=200, cmap='rainbow')
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c='black', s=50)
plt.xlim(-4, 4)
plt.ylim(-2, 10)
if frame % 3 == 1:
plt.text(3.8, 9.5, "1. Reassign points to nearest centroid",
ha='right', va='top', size=14)
elif frame % 3 == 2:
plt.text(3.8, 9.5, "2. Update centroids to cluster means",
ha='right', va='top', size=14)
return interact(_kmeans_step, frame=[0, 50],
n_clusters=[min_clusters, max_clusters])
def plot_image_components(x, coefficients=None, mean=0, components=None,
imshape=(8, 8), n_components=6, fontsize=12):
if coefficients is None:
coefficients = x
if components is None:
components = np.eye(len(coefficients), len(x))
mean = np.zeros_like(x) + mean
fig = plt.figure(figsize=(1.2 * (5 + n_components), 1.2 * 2))
g = plt.GridSpec(2, 5 + n_components, hspace=0.3)
def show(i, j, x, title=None):
ax = fig.add_subplot(g[i, j], xticks=[], yticks=[])
ax.imshow(x.reshape(imshape), interpolation='nearest')
if title:
ax.set_title(title, fontsize=fontsize)
show(slice(2), slice(2), x, "True")
approx = mean.copy()
show(0, 2, np.zeros_like(x) + mean, r'$\mu$')
show(1, 2, approx, r'$1 \cdot \mu$')
for i in range(0, n_components):
approx = approx + coefficients[i] * components[i]
show(0, i + 3, components[i], r'$c_{0}$'.format(i + 1))
show(1, i + 3, approx,
r"${0:.2f} \cdot c_{1}$".format(coefficients[i], i + 1))
plt.gca().text(0, 1.05, '$+$', ha='right', va='bottom',
transform=plt.gca().transAxes, fontsize=fontsize)
show(slice(2), slice(-2, None), approx, "Approx")
def plot_pca_interactive(data, n_components=6):
from sklearn.decomposition import PCA
from IPython.html.widgets import interact
pca = PCA(n_components=n_components)
Xproj = pca.fit_transform(data)
def show_decomp(i=0):
plot_image_components(data[i], Xproj[i],
pca.mean_, pca.components_)
interact(show_decomp, i=(0, data.shape[0] - 1));
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/tests/test_pipeline.py | 7 | 24571 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = Transf()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Step names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Step names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
| bsd-3-clause |
gorakhargosh/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
fzenke/morla | papers/utils.py | 1 | 10632 | #!/usr/bin/python
from scipy import sparse
from sklearn.feature_extraction.text import HashingVectorizer
import numpy as np
import re
from tqdm import tqdm
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import django
from django.utils import timezone
from django.utils.encoding import smart_text
from django.db.models import Q
import bibtexparser
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
from bibtexparser.latexenc import latex_to_unicode
# from pylatexenc.latex2text import latex2text
from datetime import datetime
django.setup()
from papers.models import Article, Profile, Feature, Recommendation, Similarity
from django.contrib.auth.models import User
feature_dims = { 'title' : 2**20, 'authors' : 2**16, 'abstract' : 2**20, 'keywords' : 2**16 }
def get_recommended_articles(request):
articles = []
if request.user.is_authenticated:
profile,_ = Profile.objects.get_or_create(user=request.user, defaults={'last_prediction_run': timezone.now(), 'last_traindata_update': timezone.now()})
articles = profile.suggested.all().order_by('-pubdate')
else: # Get suggested articles from all users
articles = Article.objects.filter(suggested__isnull=False).distinct().order_by('-pubdate')
return articles
def get_similar_articles(article, limit=11):
# similarities = Similarity.objects.filter( a=article ).order_by('-value')[:limit]
similarities = Similarity.objects.filter( Q(a=article) | Q(b=article) ).order_by('-value')[:limit]
articles = []
for s in similarities:
if s.a==article:
articles.append(s.b)
else:
articles.append(s.a)
return articles
def set_label(request, article_id, label=0):
""" Adds article to training set of the authenticated user with given label
args:
request the request object
article_id the article id
label the label which is either 1 for ham, -1 for spam or 0 which removes the label
"""
if not request.user.is_authenticated: return False
profile,_ = Profile.objects.get_or_create(user=request.user, defaults={'last_prediction_run': timezone.now(), 'last_traindata_update': timezone.now()})
article = Article.objects.get(id=article_id)
label = int(label)
if label > 0:
profile.ham.add(article)
profile.spam.remove(article)
elif label < 0:
profile.spam.add(article)
profile.ham.remove(article)
else:
profile.ham.remove(article)
profile.spam.remove(article)
# Store time when we last updated the profile
profile.last_traindata_update = timezone.now()
profile.save()
return True
def toggle_star(request, article_id):
""" Stars article or removes star if already starred
args:
request the request object
article_id the article id
returns True if successful otherwise False
"""
if not request.user.is_authenticated: return 0
profile,_ = Profile.objects.get_or_create(user=request.user, defaults={'last_prediction_run': timezone.now(), 'last_traindata_update': timezone.now()})
article = Article.objects.get(id=int(article_id))
if profile.starred.filter(id=article_id).exists():
profile.starred.remove(article)
return 0
else:
profile.starred.add(article)
return 1
# Prepare re_pattern to filter Unicode which would take more than 3bytes (to avoid MySQL trouble)
re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
def filter_using_re(unicode_string):
return re_pattern.sub(u'\uFFFD', unicode_string)
def prepare_string(x, max_length=None):
""" Converts a string from LaTeX escapes to UTF8 and truncates it to max_length """
# data = latex2text(x, tolerant_parsing=True)
try:
data = latex_to_unicode(filter_using_re(x))
if max_length is not None:
data = (data[:max_length-5] + '[...]') if len(data) > max_length else data
return smart_text(data)
except TypeError:
logger.warning("Encountered a TypeError which may be linked to unicode handling "
"in bibtexparser when processing the following string: %s."%x)
return ""
def key2str(key, dic, max_length=None):
""" Gets entry from dict and returns an empty string if the key does not exist. """
if key in dic.keys():
return prepare_string(dic[key], max_length=max_length)
else:
return ''
def key2int(key, dic):
""" Gets integer entry from dict and returns None if the key does not exist or if there is a ValueError. """
value=None
if key in dic.keys():
try:
value=int(dic[key])
except ValueError:
value=None
return value
def import_bibtex(bibtex_str, nb_max=None, update=True):
""" Reads a bibtex string and returns a list of Article instances """
parser = BibTexParser(ignore_nonstandard_types=False, homogenize_fields=True, common_strings=True)
parser.customization = convert_to_unicode
bib_database = bibtexparser.loads(bibtex_str, parser)
logger.info("Entries read from BibTeX data %i"%len(bib_database.entries))
# packaging into django objects
data = []
for e in bib_database.entries:
title = key2str('title',e, 250)
authors = key2str('author',e,500)
journal = key2str('journal',e,250)
abstract = key2str('abstract',e)
if not key2int('year',e) or not abstract or not title: continue
pubdate = datetime(key2int('year',e),1,1)
keywords = key2str('keyword',e,250)
args = dict(title=title,
authors=authors,
pubdate=pubdate,
journal=journal,
abstract=abstract,
keywords=keywords,
url=key2str('link',e),
doi=key2str('doi',e),
pmid=key2int('pmid',e),
)
if update:
art, created = add_or_update_article(**args)
else:
art, created = get_or_create(**args)
art.save()
data.append(art)
if nb_max is not None:
if len(data)>=nb_max:
break
logger.info("%i entries processed"%len(data))
return data
def compute_features( data ):
""" Converts a list of tuples with title, authors, abstract, keywords to sparse tokenized feature vectors. """
titles, authors, abstracts, keywords = zip(*data)
shared_params = dict(stop_words='english', strip_accents=None, non_negative=True, analyzer="word", ngram_range=(1,2) )
title_vectorizer = HashingVectorizer( n_features=feature_dims['title'], **shared_params )
authors_vectorizer = HashingVectorizer( n_features=feature_dims['authors'], **shared_params )
# journal_vectorizer = HashingVectorizer( n_features=feature_dims['journal'], **shared_params )
abstract_vectorizer = HashingVectorizer( n_features=feature_dims['abstract'], **shared_params )
keywords_vectorizer = HashingVectorizer( n_features=feature_dims['keywords'], **shared_params )
title_vecs = title_vectorizer.transform(titles)
authors_vecs = authors_vectorizer.transform(authors)
# journal_vecs = journal_vectorizer.transform(journals)
abstract_vecs = abstract_vectorizer.transform(abstracts)
keyword_vecs = keywords_vectorizer.transform(keywords)
feature_vectors = sparse.hstack((title_vecs, authors_vecs, abstract_vecs, keyword_vecs))
return feature_vectors
def get_feature_vector( article ):
fts = Feature.objects.filter( article=article )
index = fts.values_list('index', flat=True)
val = fts.values_list('value', flat=True)
return index, val
def get_feature_vector_size():
feature_vector_size = 0
for itm in feature_dims.values():
feature_vector_size += itm
return feature_vector_size
def get_features_from_db( articles ):
data = []
row = []
col = []
for i,ai in enumerate(articles):
c,v = get_feature_vector( ai )
r = np.ones(len(c))*i
data.extend(v)
row.extend(r)
col.extend(c)
A = sparse.coo_matrix( (data, (row, col)), shape=(len(articles),get_feature_vector_size()))
return A
def add_to_training_set( profile, articles, label ):
""" Takes a user and a list of articles and adds them as training data with the given label """
if label>0:
profile.ham.add(*articles)
elif label<0:
profile.spam.add(*articles)
def add_or_update_article(title, authors, pubdate, journal, abstract, url=None, doi=None, keywords=None, pmid=None ):
art, created = Article.objects.get_or_create(
title=title,
authors=authors,
defaults={ 'pubdate' : pubdate, 'date_added' : timezone.now() }
)
art.journal=journal
art.pubdate=pubdate
art.abstract=abstract
art.url=url
art.doi=doi
if keywords is not None: art.keywords=keywords
if pmid is not None: art.pmid=pmid
art.date_added = timezone.now()
art.save()
Feature.objects.filter( article=art ).delete()
return art, created
def get_or_create(title, authors, pubdate, journal, abstract, url=None, doi=None, keywords=None, pmid=None ):
art, created = Article.objects.get_or_create(
title=title,
authors=authors,
defaults={ 'pubdate' : pubdate, 'date_added' : timezone.now() }
)
if created:
art.journal=journal
art.abstract=abstract
art.url=url
art.doi=doi
if keywords is not None: art.keywords=keywords
if pmid is not None: art.pmid=pmid
art.save()
return art, created
def get_training_set( profile, padrandom=True ):
articles = list( profile.ham.all() )
labels = [ 1 for i in range(len(articles)) ]
spam = profile.spam.all()
articles.extend( spam )
labels.extend( [ -1 for i in range(len(spam)) ] )
nb_pad = profile.ham.all().count() - profile.spam.all().count()
if padrandom and nb_pad>0:
logger.debug("Using %i random patterns to augment spam set"%nb_pad)
pad = Article.objects.exclude(ham=profile).order_by('?')[:nb_pad]
articles.extend( pad )
labels.extend( [ -1 for i in range(len(pad)) ] )
data = get_features_from_db( articles )
return data, np.array(labels)
if __name__ == "__main__":
profile,_ = Profile.objects.get_or_create(user=User.objects.all()[0])
data, labels = get_training_set( profile )
print(data)
print(labels)
| mit |
phobson/statsmodels | statsmodels/tools/grouputils.py | 2 | 22574 | # -*- coding: utf-8 -*-
"""Tools for working with groups
This provides several functions to work with groups and a Group class that
keeps track of the different representations and has methods to work more
easily with groups.
Author: Josef Perktold,
Author: Nathaniel Smith, recipe for sparse_dummies on scipy user mailing list
Created on Tue Nov 29 15:44:53 2011 : sparse_dummies
Created on Wed Nov 30 14:28:24 2011 : combine_indices
changes: add Group class
Notes
~~~~~
This reverses the class I used before, where the class was for the data and
the group was auxiliary. Here, it is only the group, no data is kept.
sparse_dummies needs checking for corner cases, e.g.
what if a category level has zero elements? This can happen with subset
selection even if the original groups where defined as arange.
Not all methods and options have been tried out yet after refactoring
need more efficient loop if groups are sorted -> see GroupSorted.group_iter
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
import numpy as np
import pandas as pd
from statsmodels.compat.numpy import npc_unique
from statsmodels.compat.pandas import sort_values
import statsmodels.tools.data as data_util
from pandas.core.index import Index, MultiIndex
def combine_indices(groups, prefix='', sep='.', return_labels=False):
"""use np.unique to get integer group indices for product, intersection
"""
if isinstance(groups, tuple):
groups = np.column_stack(groups)
else:
groups = np.asarray(groups)
dt = groups.dtype
is2d = (groups.ndim == 2) # need to store
if is2d:
ncols = groups.shape[1]
if not groups.flags.c_contiguous:
groups = np.array(groups, order='C')
groups_ = groups.view([('', groups.dtype)] * groups.shape[1])
else:
groups_ = groups
uni, uni_idx, uni_inv = npc_unique(groups_, return_index=True,
return_inverse=True)
if is2d:
uni = uni.view(dt).reshape(-1, ncols)
# avoiding a view would be
# for t in uni.dtype.fields.values():
# assert (t[0] == dt)
#
# uni.dtype = dt
# uni.shape = (uni.size//ncols, ncols)
if return_labels:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
return uni_inv, uni_idx, uni, label
else:
return uni_inv, uni_idx, uni
# written for and used in try_covariance_grouploop.py
def group_sums(x, group, use_bincount=True):
"""simple bincount version, again
group : array, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
elif x.ndim > 2 and use_bincount:
raise ValueError('not implemented yet')
if use_bincount:
# re-label groups or bincount takes too much memory
if np.max(group) > 2 * x.shape[0]:
group = pd.factorize(group)[0]
return np.array([np.bincount(group, weights=x[:, col])
for col in range(x.shape[1])])
else:
uniques = np.unique(group)
result = np.zeros([len(uniques)] + list(x.shape[1:]))
for ii, cat in enumerate(uniques):
result[ii] = x[g == cat].sum(0)
return result
def group_sums_dummy(x, group_dummy):
"""sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
"""
if data_util._is_using_ndarray_type(group_dummy, None):
return np.dot(x.T, group_dummy)
else: # check for sparse
return x.T * group_dummy
def dummy_sparse(groups):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are
assumed to be defined as consecutive integers, i.e. range(n_groups)
where n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
indi = sparse.csr_matrix((data, g, indptr))
return indi
class Group(object):
def __init__(self, group, name=''):
# self.group = np.asarray(group) # TODO: use checks in combine_indices
self.name = name
uni, uni_idx, uni_inv = combine_indices(group)
# TODO: rename these to something easier to remember
self.group_int, self.uni_idx, self.uni = uni, uni_idx, uni_inv
self.n_groups = len(self.uni)
# put this here so they can be overwritten before calling labels
self.separator = '.'
self.prefix = self.name
if self.prefix:
self.prefix = self.prefix + '='
# cache decorator
def counts(self):
return np.bincount(self.group_int)
# cache_decorator
def labels(self):
# is this only needed for product of groups (intersection)?
prefix = self.prefix
uni = self.uni
sep = self.separator
if uni.ndim > 1:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
else:
label = [prefix + '%s' % ii for ii in uni]
return label
def dummy(self, drop_idx=None, sparse=False, dtype=int):
"""
drop_idx is only available if sparse=False
drop_idx is supposed to index into uni
"""
uni = self.uni
if drop_idx is not None:
idx = lrange(len(uni))
del idx[drop_idx]
uni = uni[idx]
group = self.group
if not sparse:
return (group[:, None] == uni[None, :]).astype(dtype)
else:
return dummy_sparse(self.group_int)
def interaction(self, other):
if isinstance(other, self.__class__):
other = other.group
return self.__class__((self, other))
def group_sums(self, x, use_bincount=True):
return group_sums(x, self.group_int, use_bincount=use_bincount)
def group_demean(self, x, use_bincount=True):
nobs = float(len(x))
means_g = group_sums(x / nobs, self.group_int,
use_bincount=use_bincount)
x_demeaned = x - means_g[self.group_int] # check reverse_index?
return x_demeaned, means_g
class GroupSorted(Group):
def __init__(self, group, name=''):
super(self.__class__, self).__init__(group, name=name)
idx = (np.nonzero(np.diff(group))[0]+1).tolist()
self.groupidx = lzip([0] + idx, idx + [len(group)])
def group_iter(self):
for low, upp in self.groupidx:
yield slice(low, upp)
def lag_indices(self, lag):
"""return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we wouldn't know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet
"""
lag_idx = np.asarray(self.groupidx)[:, 1] - lag # asarray or already?
mask_ok = (lag <= lag_idx)
# still an observation that belongs to the same individual
return lag_idx[mask_ok]
def _is_hierarchical(x):
"""
Checks if the first item of an array-like object is also array-like
If so, we have a MultiIndex and returns True. Else returns False.
"""
item = x[0]
# is there a better way to do this?
if isinstance(item, (list, tuple, np.ndarray, pd.Series, pd.DataFrame)):
return True
else:
return False
def _make_hierarchical_index(index, names):
return MultiIndex.from_tuples(*[index], names=names)
def _make_generic_names(index):
n_names = len(index.names)
pad = str(len(str(n_names))) # number of digits
return [("group{0:0"+pad+"}").format(i) for i in range(n_names)]
class Grouping(object):
def __init__(self, index, names=None):
"""
index : index-like
Can be pandas MultiIndex or Index or array-like. If array-like
and is a MultipleIndex (more than one grouping variable),
groups are expected to be in each row. E.g., [('red', 1),
('red', 2), ('green', 1), ('green', 2)]
names : list or str, optional
The names to use for the groups. Should be a str if only
one grouping variable is used.
Notes
-----
If index is already a pandas Index then there is no copy.
"""
if isinstance(index, (Index, MultiIndex)):
if names is not None:
if hasattr(index, 'set_names'): # newer pandas
index.set_names(names, inplace=True)
else:
index.names = names
self.index = index
else: # array-like
if _is_hierarchical(index):
self.index = _make_hierarchical_index(index, names)
else:
self.index = Index(index, name=names)
if names is None:
names = _make_generic_names(self.index)
if hasattr(self.index, 'set_names'):
self.index.set_names(names, inplace=True)
else:
self.index.names = names
self.nobs = len(self.index)
self.nlevels = len(self.index.names)
self.slices = None
@property
def index_shape(self):
if hasattr(self.index, 'levshape'):
return self.index.levshape
else:
return self.index.shape
@property
def levels(self):
if hasattr(self.index, 'levels'):
return self.index.levels
else:
return pd.Categorical(self.index).levels
@property
def labels(self):
# this was index_int, but that's not a very good name...
if hasattr(self.index, 'labels'):
return self.index.labels
else: # pandas version issue here
# Compat code for the labels -> codes change in pandas 0.15
# FIXME: use .codes directly when we don't want to support
# pandas < 0.15
tmp = pd.Categorical(self.index)
try:
labl = tmp.codes
except AttributeError:
labl = tmp.labels # Old pandsd
return labl[None]
@property
def group_names(self):
return self.index.names
def reindex(self, index=None, names=None):
"""
Resets the index in-place.
"""
# NOTE: this isn't of much use if the rest of the data doesn't change
# This needs to reset cache
if names is None:
names = self.group_names
self = Grouping(index, names)
def get_slices(self, level=0):
"""
Sets the slices attribute to be a list of indices of the sorted
groups for the first index level. I.e., self.slices[0] is the
index where each observation is in the first (sorted) group.
"""
# TODO: refactor this
groups = self.index.get_level_values(level).unique()
groups.sort()
if isinstance(self.index, MultiIndex):
self.slices = [self.index.get_loc_level(x, level=level)[0]
for x in groups]
else:
self.slices = [self.index.get_loc(x) for x in groups]
def count_categories(self, level=0):
"""
Sets the attribute counts to equal the bincount of the (integer-valued)
labels.
"""
# TODO: refactor this not to set an attribute. Why would we do this?
self.counts = np.bincount(self.labels[level])
def check_index(self, is_sorted=True, unique=True, index=None):
"""Sanity checks"""
if not index:
index = self.index
if is_sorted:
test = pd.DataFrame(lrange(len(index)), index=index)
test_sorted = test.sort()
if not test.index.equals(test_sorted.index):
raise Exception('Data is not be sorted')
if unique:
if len(index) != len(index.unique()):
raise Exception('Duplicate index entries')
def sort(self, data, index=None):
"""Applies a (potentially hierarchical) sort operation on a numpy array
or pandas series/dataframe based on the grouping index or a
user-supplied index. Returns an object of the same type as the
original data as well as the matching (sorted) Pandas index.
"""
if index is None:
index = self.index
if data_util._is_using_ndarray_type(data, None):
if data.ndim == 1:
out = pd.Series(data, index=index, copy=True)
out = out.sort_index()
else:
out = pd.DataFrame(data, index=index)
out = out.sort_index(inplace=False) # copies
return np.array(out), out.index
elif data_util._is_using_pandas(data, None):
out = data
out = out.reindex(index) # copies?
out = out.sort_index()
return out, out.index
else:
msg = 'data must be a Numpy array or a Pandas Series/DataFrame'
raise ValueError(msg)
def transform_dataframe(self, dataframe, function, level=0, **kwargs):
"""Apply function to each column, by group
Assumes that the dataframe already has a proper index"""
if dataframe.shape[0] != self.nobs:
raise Exception('dataframe does not have the same shape as index')
out = dataframe.groupby(level=level).apply(function, **kwargs)
if 1 in out.shape:
return np.ravel(out)
else:
return np.array(out)
def transform_array(self, array, function, level=0, **kwargs):
"""Apply function to each column, by group
"""
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
dataframe = pd.DataFrame(array, index=self.index)
return self.transform_dataframe(dataframe, function, level=level,
**kwargs)
def transform_slices(self, array, function, level=0, **kwargs):
"""Apply function to each group. Similar to transform_array but does
not coerce array to a DataFrame and back and only works on a 1D or 2D
numpy array. function is called function(group, group_idx, **kwargs).
"""
array = np.asarray(array)
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
# always reset because level is given. need to refactor this.
self.get_slices(level=level)
processed = []
for s in self.slices:
if array.ndim == 2:
subset = array[s, :]
elif array.ndim == 1:
subset = array[s]
processed.append(function(subset, s, **kwargs))
processed = np.array(processed)
return processed.reshape(-1, processed.shape[-1])
# TODO: this isn't general needs to be a PanelGrouping object
def dummies_time(self):
self.dummy_sparse(level=1)
return self._dummies
def dummies_groups(self, level=0):
self.dummy_sparse(level=level)
return self._dummies
def dummy_sparse(self, level=0):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,) an array of group indicators for each
observation. Group levels are assumed to be defined as consecutive
integers, i.e. range(n_groups) where n_groups is the number of
group levels. A group level with no observations for it will still
produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
groups = self.labels[level]
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
self._dummies = sparse.csr_matrix((data, groups, indptr))
if __name__ == '__main__':
# ---------- examples combine_indices
from numpy.testing import assert_equal
np.random.seed(985367)
groups = np.random.randint(0, 2, size=(10, 2))
uv, ux, u, label = combine_indices(groups, return_labels=True)
uv, ux, u, label = combine_indices(groups, prefix='g1,g2=', sep=',',
return_labels=True)
group0 = np.array(['sector0', 'sector1'])[groups[:, 0]]
group1 = np.array(['region0', 'region1'])[groups[:, 1]]
uv, ux, u, label = combine_indices((group0, group1),
prefix='sector,region=',
sep=',',
return_labels=True)
uv, ux, u, label = combine_indices((group0, group1), prefix='', sep='.',
return_labels=True)
group_joint = np.array(label)[uv]
group_joint_expected = np.array(['sector1.region0', 'sector0.region1',
'sector0.region0', 'sector0.region1',
'sector1.region1', 'sector0.region0',
'sector1.region0', 'sector1.region0',
'sector0.region1', 'sector0.region0'],
dtype='|S15')
assert_equal(group_joint, group_joint_expected)
"""
>>> uv
array([2, 1, 0, 0, 1, 0, 2, 0, 1, 0])
>>> label
['sector0.region0', 'sector1.region0', 'sector1.region1']
>>> np.array(label)[uv]
array(['sector1.region1', 'sector1.region0', 'sector0.region0',
'sector0.region0', 'sector1.region0', 'sector0.region0',
'sector1.region1', 'sector0.region0', 'sector1.region0',
'sector0.region0'],
dtype='|S15')
>>> np.column_stack((group0, group1))
array([['sector1', 'region1'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector1', 'region1'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0']],
dtype='|S7')
"""
# ------------- examples sparse_dummies
from scipy import sparse
g = np.array([0, 0, 1, 2, 1, 1, 2, 0])
u = lrange(3)
indptr = np.arange(len(g)+1)
data = np.ones(len(g), dtype=np.int8)
a = sparse.csr_matrix((data, g, indptr))
print(a.todense())
print(np.all(a.todense() == (g[:, None] == np.arange(3)).astype(int)))
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
print('group means')
print(x.T * a)
print(np.dot(x.T, g[:, None] == np.arange(3)))
print(np.array([np.bincount(g, weights=x[:, col]) for col in range(3)]))
for cat in u:
print(x[g == cat].sum(0))
for cat in u:
x[g == cat].sum(0)
cc = sparse.csr_matrix([[0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 0]])
# ------------- groupsums
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2), g,
use_bincount=False).T)
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 0], g))
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 1], g))
# ------------- examples class
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
mygroup = Group(g)
print(mygroup.group_int)
print(mygroup.group_sums(x))
print(mygroup.labels())
| bsd-3-clause |
phdowling/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
dkhavari/open-source-investing | scripts/simple_mean_reversion.py | 1 | 1738 | import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import statsmodels.formula.api as sm
from sklearn.linear_model import LinearRegression
import scipy, scipy.stats
import datetime
import time
import Quandl
import sys
# Some stuff to get the right range of data.
seed = time.strftime("%Y-%m-%d")
finish = datetime.datetime.strptime(seed, '%Y-%m-%d')
start = finish - datetime.timedelta(days=150)
# Boilerplate args and Quandl interaction.
token = "GnTpdtBqSqCaKSZeZVd5"
filename = sys.argv[1]
# Get the ticker file.
f = open(filename, 'r')
# Create the stock list.
list_of_stocks = []
# Loop through everything...
for line in f:
ticker = line.strip('\n')
source = 'GOOG/NASDAQ_' + ticker
# Receive the data that I want.
try:
past_150d = Quandl.get(source, authtoken=token, trim_start=start,trim_end=finish)
except:
continue
# Figure out how to manipulate it a little bit.
closing_prices = past_150d['Close']
# Get useful metrics for computation.
mean = Series.mean(closing_prices)
stddev = Series.std(closing_prices)
try:
current_price = closing_prices[len(closing_prices) - 1]
except:
continue
# Decide if this is a stock worth looking at.
if current_price < (mean - stddev):
# Some testing stuff.
print '<><><> ' + str(ticker) + ' <><><>'
# Compute by how much the current price is under the mean.
stddevs_under_mean = (mean - current_price)/stddev
stock_entry = [ticker, stddevs_under_mean]
list_of_stocks.append(stock_entry)
# Sort the tickers by the one farthest beneath the mean, then print in that order.
sorted_stocks = sorted(list_of_stocks, key=lambda x: float(x[1]), reverse=True)
for entry in sorted_stocks:
print str(entry[0]) + ' -' + str(entry[1])
| mit |
adowaconan/Spindle_by_Graphical_Features | Generate_Features_with_more_channels.py | 1 | 6406 | # -*- coding: utf-8 -*-
"""
Created on Wed May 17 12:35:28 2017
@author: ning
"""
import numpy as np
import pandas as pd
import os
from collections import Counter
from time import time
os.chdir('D:\\NING - spindle\\Spindle_by_Graphical_Features')
channelList = ['F3','F4','C3','C4','O1','O2','F5',
'F1','F2','FC4','F6','C2','CP4','C6','C1',
'CP3','C5','FC3','PO3','PO7','PO4','PO8','AFz',
'Fz','Cz','CPz','Pz','POz','Oz']
#channelList = None
import eegPipelineFunctions
raw_dir = 'D:\\NING - spindle\\training set\\'
# get EEG files that have corresponding annotations
raw_files = []
for file in [f for f in os.listdir(raw_dir) if ('txt' in f)]:
sub = int(file.split('_')[0][3:])
if sub < 11:
day = file.split('_')[1][1]
day_for_load = file.split('_')[1][:2]
else:
day = file.split('_')[2][-1]
day_for_load = file.split('_')[2]
raw_file = [f for f in os.listdir(raw_dir) if (file.split('_')[0] in f) and (day_for_load in f) and ('fif' in f)]
if len(raw_file) != 0:
raw_files.append([raw_dir + raw_file[0],raw_dir + file])
# directory for storing all the feature files
raw_dir = 'D:\\NING - spindle\\training set\\road_trip_%d_channels\\' % len(channelList)
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
# initialize the range of the parameters we want to compute based on
epoch_lengths = np.arange(1.,5.,0.2) # 1. to 5 seconds with 0.5 stepsize
plv_thresholds = np.arange(0.6, 0.85, 0.05) # 0.6 to 0.8 with .05
pli_thresholds = np.arange(0.05,0.30, 0.05) # 0.05 to 0.25 with 0.05
cc_thresholds = np.arange(0.7, 0.95,0.05) # 0.7 to 0.9 with 0.05
# make sub-directories based on epoch length
first_level_directory = []
for epoch_length in epoch_lengths:
directory_1 = raw_dir + 'epoch_length '+str(epoch_length)+'\\'
if not os.path.exists(directory_1):
os.makedirs(directory_1)
first_level_directory.append(directory_1)
os.chdir(directory_1)
#print(os.getcwd())
for files in raw_files:
raw_file, annotation_file = files
temp_anno = annotation_file.split('\\')[-1]
sub = int(temp_anno.split('_')[0][3:])
if sub < 11:
day = temp_anno.split('_')[1][1]
day_for_load = temp_anno.split('_')[1][:2]
else:
day = temp_anno.split('_')[2][-1]
day_for_load = temp_anno.split('_')[2]
directory_2 = directory_1 + 'sub' + str(sub) + 'day' + day + '\\'
if not os.path.exists(directory_2):
#print(directory_2)
os.makedirs(directory_2)
os.chdir(directory_2)
# epoch the data
ssssss = time()
epochs,label,my_features,_ = eegPipelineFunctions.get_data_ready(raw_file,channelList,
annotation_file,
epoch_length=epoch_length)
print('epoch_length '+str(epoch_length),Counter(label))
# extract signal features
print('extracting signal features ......')
epochFeature = eegPipelineFunctions.featureExtraction(epochs,)
epochFeature = pd.DataFrame(epochFeature)
epochFeature['label']=label
epochFeature.to_csv('sub'+str(sub)+'day'+day+'_'+str(epoch_length)+'_'+'epoch_features.csv',index=False)
my_features = pd.DataFrame(my_features)
my_features['label']=label
my_features.to_csv('sub'+str(sub)+'day'+day+'_'+str(epoch_length)+'_'+'my_features.csv',index=False)
# compute adjasency matrices based on epochs
connectivity = eegPipelineFunctions.connectivity(epochs)
connectivity = np.array(connectivity)
plv, pli, cc = connectivity[0,:,:,:],connectivity[1,:,:,:],connectivity[2,:,:,:]
# pre-thresholding graph features
print('extracting graph features of plv ........')
plv_pre_threshold = eegPipelineFunctions.extractGraphFeatures(plv)
plv_pre_threshold['label']=label
print('extracting graph features of pli ........')
pli_pre_threshold = eegPipelineFunctions.extractGraphFeatures(pli)
pli_pre_threshold['label']=label
print('extracting graph features of cc .........')
cc_pre_threshold = eegPipelineFunctions.extractGraphFeatures(cc )
cc_pre_threshold['label']=label
plv_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'plv_features.csv',index=False)
pli_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'pli_features.csv',index=False)
cc_pre_threshold.to_csv('sub'+str(sub)+'day'+day+'cc_features.csv',index=False)
eeeeee = time()
print('done signal, plv, pli, and cc, cost time: %d s'%(eeeeee - ssssss))
# print('start thresholding')
# # extract graph features
# for t_plv,t_pli,t_cc in zip(plv_thresholds,pli_thresholds,cc_thresholds):
# # convert adjasency matrices to binary adjasency matrices
# adj_plv = eegPipelineFunctions.thresholding(t_plv,plv)
# adj_pli = eegPipelineFunctions.thresholding(t_pli,pli)
# adj_cc = eegPipelineFunctions.thresholding(t_cc, cc )
# # this is how we extract graph features
# graphFeature_plv = eegPipelineFunctions.extractGraphFeatures(adj_plv)
# graphFeature_pli = eegPipelineFunctions.extractGraphFeatures(adj_pli)
# graphFeature_cc = eegPipelineFunctions.extractGraphFeatures(adj_cc )
# # prepare the sub-directories for storing feature files
# plv_dir = directory_2 + 'plv_' + str(t_plv) + '\\'
# pli_dir = directory_2 + 'pli_' + str(t_pli) + '\\'
# cc_dir = directory_2 + 'cc_' + str(t_cc ) + '\\'
# if not os.path.exists(plv_dir):
# os.makedirs(plv_dir)
# if not os.path.exists(pli_dir):
# os.makedirs(pli_dir)
# if not os.path.exists(cc_dir):
# os.makedirs(cc_dir)
# # saving csvs
# pd.concat([epochFeature,graphFeature_plv],axis=1).to_csv(plv_dir + 'plv_' + str(t_plv) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_pli],axis=1).to_csv(pli_dir + 'pli_' + str(t_pli) + '.csv',index=False)
# pd.concat([epochFeature,graphFeature_cc ],axis=1).to_csv(cc_dir + 'cc_' + str(t_cc ) + '.csv',index=False)
| mit |
ajm/pulp | explore/management/commands/linrel.py | 2 | 2428 | # This file is part of PULP.
#
# PULP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PULP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PULP. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize
from explore.models import Article
from explore.utils import *
from explore.arxiv import ArxivCleaner
import numpy as np
class Command(BaseCommand) :
args = 'no arguments'
help = 'write LinRel related files'
def handle(self, *args, **options) :
#v = CountVectorizer(min_df=2, stop_words=get_stop_words(), dtype=np.float64)
v = TfidfVectorizer(min_df=10, stop_words=get_stop_words(), dtype=np.float64, norm='l2')
self.stdout.write("Building matrix from %d articles...\n" % Article.objects.count(), ending='')
self.stdout.flush()
#m = v.fit_transform(build_corpus())
arxiv = ArxivCleaner()
m = v.fit_transform(arxiv.build_corpus(Article.objects.all(), stem=True))
self.stdout.write("done!\n")
# self.stdout.write("Normalising... ", ending='')
# self.stdout.flush()
#
# if not scipy.sparse.isspmatrix_csr(m) :
# m = m.tocsr()
#
# normalize(m, norm='l2', copy=False)
# self.stdout.write("done!\n")
self.stdout.write("Writing LinRel file...", ending='')
self.stdout.flush()
save_sparse_linrel(m)
save_features_linrel(dict([ (y,x) for x,y in enumerate(v.get_feature_names()) ]))
# self.stdout.write("Writing out %d keyword features..." % len(v.get_feature_names()), ending='')
# self.stdout.flush()
# with open('keywords.txt', 'w') as f :
# for index,feature in enumerate(v.get_feature_names()) :
# print >> f, index, ''.join([i for i in feature if ord(i) < 128])
#
self.stdout.write("done!\n")
| gpl-3.0 |
SGenheden/Scripts | Projects/Gpcr/gpcr_plot_fractalpies.py | 1 | 8528 | # Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to draw roughness (fractal) pies on structures
No arguments are necessary, all structures are taken from standard locations
"""
import argparse
import os
import sys
import numpy as np
import matplotlib
if not "DISPLAY" in os.environ or os.environ["DISPLAY"] == "" :
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.colors as colors
import matplotlib.patches as patches
import gpcr_lib
# Import the calc_surf program
thispath = os.path.dirname(os.path.abspath(__file__))
oneup = os.path.split(thispath)[0]
sys.path.insert(0,oneup)
import calc_surf
class StructurePartition :
"""
Class to store a structure partition on pies
to perform fractal calculations
Attributes
----------
aastruct : PDBFile
the atomistic structure used in the surface calculations
edges : numpy array
the radial edges of the partition
fractal_low : numpy array
the average fractal for each partition in the lower leaflet
is None unless calc_fractal() has been called
fractal_upp : numpy array
the average fractal for each partition in the upper leaflet
is None unless calc_fractal() has been called
lowsel : numpy array
indicates if residue is on lower leaflet
minval : float
the lowest fractal
maxval : float
the highest fractal
npies : float
the number of radial partitions, i.e. pies
probes : numpy array
the probe radius
is None if calc_surface() has not been called
selection : list of Selection objects
selection for each helix
xray : XrayDensity object
the structure of the X-ray
xyzrnames : list of string
filename for temporary structure names
"""
def __init__(self, xray, aastruct, npies) :
self.xray = xray
self.npies = npies
self.aastruct = aastruct
# Determine in which pie the residues are in
allcent = xray.pdbfile.xyz.mean(axis=0)
centres = np.array([res.collect("centerofmass")-allcent for res in xray.pdbfile.residues])
self.edges = np.linspace(-180,180,self.npies+1,endpoint=True)
ang = np.arctan2(centres[:,0],centres[:,1])*180.0/np.pi
self.partition = np.digitize(ang,self.edges)
# Setup a calc_surf.Selection for each helix
self.xyzrnames = [None]*len(xray.template.rhelices)
self.selections = [None]*len(xray.template.rhelices)
radii = np.asarray([calc_surf.bornradii[atom.element().upper()] for atom in aastruct.atoms])
for i,h in enumerate(xray.template.rhelices) :
aidx1 = aastruct.residues[h[0]-1].atoms[0].idx
aidx2 = aastruct.residues[h[1]-1].atoms[-1].idx
self.xyzrnames[i] = calc_surf.write_xyzr(aastruct.xyz[aidx1:aidx2+1,:],radii[aidx1:aidx2+1])
self.selections[i] = calc_surf.Selection(aastruct.residues[h[0]:h[1]+1],self.xyzrnames[i])
# This select residues in lower leaflet
self.lowsel = centres[:,2]+allcent[2] < xray.box[2] / 2.0
# Initialise arrays to None
self.probes = None
self.fractal_low = None
self.fractal_upp = None
def clean_up(self) :
"""
Removes the temporary structure files from disc
"""
for xyzrname in self.xyzrnames :
os.remove(xyzrname)
def calc_fractal(self) :
"""
Calculates the fractal for each partion, by averaging over helix selections
"""
if self.probes is None : return
self.fractal_low = np.zeros(self.npies)
self.fractal_upp = np.zeros(self.npies)
ncount_low = np.zeros(self.npies)
ncount_upp = np.zeros(self.npies)
for sel in self.selections :
fsel = sel.fractal()
for f in fsel.T :
res = int(f[0])
part = self.partition[res] - 1
if self.lowsel[res] :
self.fractal_low[part] += f[1]
ncount_low[part] += 1.0
else :
self.fractal_upp[part] += f[1]
ncount_upp[part] += 1.0
self.fractal_low = self.fractal_low / ncount_low
self.fractal_upp = self.fractal_upp / ncount_upp
self.minval = min(self.fractal_low.min(),self.fractal_upp.min())
self.maxval = max(self.fractal_low.max(),self.fractal_upp.max())
def calc_surf(self,probes) :
"""
Calculates the surface of the helix selections for different probe radii
"""
self.probes = probes
for probe in probes :
for sel in self.selections :
sel.calc_surf(probe)
def plot(self, axes, labels, restocolor) :
"""
Plot the fractal on a pie chart
Parameters
axes : tuple of Axis object
the axis to draw the lower and upper leaflet pie chart
labels : tuple of strings
the labels to draw next to each axis
restocolor : list of int
residues to color
"""
def draw_pies(axis,fractal,cent,rad,reverseY) :
c = plt.Circle(cent.T,rad,ec='k',fc=None,fill=False)
axis.add_patch(c)
fractal2 = (fractal - self.minval) / (self.maxval - self.minval)
for val,e1,e2 in zip(fractal2,self.edges[:-1],self.edges[1:]) :
w = patches.Wedge(cent.T,rad,e1,e2,ec='k',fc='k',alpha=0.1)
axis.add_patch(w)
if reverseY :
x = -rad*np.cos(e1*np.pi/180.0)
y = rad*np.sin(e1*np.pi/180.0)
ee2 = 180.0*np.arctan2(y,x)/np.pi
x = -rad*np.cos(e2*np.pi/180.0)
y = rad*np.sin(e2*np.pi/180.0)
ee1 = 180.0*np.arctan2(y,x)/np.pi
w = patches.Wedge(cent.T,rad,ee1,ee2,ec=plt.cm.RdYlBu_r(val),fc=plt.cm.RdYlBu_r(val),width=5)
else :
w = patches.Wedge(cent.T,rad,e1,e2,ec=plt.cm.RdYlBu_r(val),fc=plt.cm.RdYlBu_r(val),width=5)
axis.add_patch(w)
if self.fractal_low is None : return
gpcr_lib.plot_density_xray(axes[0],0,"",0,0,self.xray,"low","Intra.",number=None,plotn=False,drawchol=False, specialres=restocolor)
gpcr_lib.plot_density_xray(axes[1],0,"",0,0,self.xray,"upp","Extra.",number=None,plotn=False,drawchol=False, specialres=restocolor)
rad = 30.0
cent = np.array([0.0,0.0])
draw_pies(axes[0],self.fractal_low,cent,rad,False)
draw_pies(axes[1],self.fractal_upp,cent,rad,True)
for a,l in zip(axes,labels) :
a.text(-40,38,l)
a.set_xticklabels([])
a.set_yticklabels([])
def print_helixroughness(self) :
all = []
for i, sel in enumerate(self.selections,1):
fsel = sel.fractal()
av = fsel[1,:].mean()
print "\tH%d\t%.5f\t%.5f"%(i, av, fsel[1,:].std())
all.append(av)
all = np.asarray(all)
print "\tOverall\t%.5f\t%.5f"%(all.mean(),all.std()/np.sqrt(all.shape[0]))
if __name__ == '__main__' :
# Command-line input
parser = argparse.ArgumentParser(description="Plotting fractal pies")
parser.add_argument('-f','--folder', help="the folder with the residue contacts")
parser.add_argument('-n','--npies',type=int,help="the number of pies",default=12)
parser.add_argument('-p','--probes',nargs="+",type=float,help="the probe sizes",default=[1.4,1.8,2.2,2.6,3.0])
args = parser.parse_args()
mols = "b2 b2_a a2a a2a_a".split()
numbers = "A) B) C) D) E) F) G) H)".split()
fig = plt.figure(1,figsize=(8,12))
# Setup and calculate the partition for each molecule
parts = [None]*len(mols)
for i,mol in enumerate(mols) :
xray, aastruct = gpcr_lib.load_xray(mol, loadsigma=True, loadaa=True)
parts[i] = StructurePartition(xray, aastruct, args.npies)
parts[i].calc_surf(args.probes)
parts[i].calc_fractal()
parts[i].clean_up()
# Find the lowest and maxium fractal among the different molecules
minval = min(2.0,[p.minval for p in parts])
maxval = np.around(max([p.maxval for p in parts]),1)
# Plot each of the pie charts
for i, (part, mol) in enumerate(zip(parts,mols)) :
part.minval = minval
part.maxval = maxval
a1 = fig.add_subplot(len(mols),2,i*2+1)
a2 = fig.add_subplot(len(mols),2,i*2+2)
restocolor = gpcr_lib.read_rescontacts(args.folder, mol)
part.plot([a1,a2], numbers[(i*2):(i+1)*2], restocolor)
# This adds a colormap
if i == 0 :
fig_dummy = plt.figure(2)
im = np.outer(np.arange(part.minval,part.maxval,0.01),np.ones(10))
a = fig_dummy.add_subplot(1,1,1,aspect="equal")
im = a.imshow(im,aspect=0.1,cmap=plt.cm.RdYlBu_r,origin='lower',extent=(0,1,part.minval,part.maxval))
a.get_xaxis().set_visible(False)
gpcr_lib.draw_colormap(fig,im,text=" Fractal", unittxt="")
fig.savefig("roughness_anal.png",format="png")
# Print average helix roughness
for part, mol in zip(parts, mols):
print "Average helix roughness for %s"%mol
part.print_helixroughness()
| mit |
chenyyx/scikit-learn-doc-zh | examples/en/cluster/plot_dbscan.py | 39 | 2534 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
# #############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| gpl-3.0 |