repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
justincely/rolodex | setup.py | 1 | 2102 | from setuptools import setup, find_packages
setup(
name = 'cos_monitoring',
version = '0.0.1',
description = 'Provide utilities and monotiring of cos data',
author = 'Justin Ely',
author_email = 'ely@stsci.edu',
keywords = ['astronomy'],
classifiers = ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules'],
packages = find_packages(),
requires = ['numpy', 'scipy', 'astropy', 'matplotlib'],
entry_points = {'console_scripts': ['clean_slate=cos_monitoring.database:clean_slate',
'cm_ingest=cos_monitoring.database:ingest_all',
'cm_monitors=cos_monitoring.database:run_all_monitors',
'create_master_csv=scripts.create_master_csv:main',
'cosmo_retrieval=cos_monitoring.retrieval.run_cosmo_retrieval',
'cm_reports=cos_monitoring.database.report:query_all',
'cm_delete=cos_monitoring.database.database:cm_delete',
'cm_describe=cos_monitoring.database.database:cm_describe',
'cm_tot_gain=cos_monitoring.cci.gainmap:make_all_gainmaps_entry'],
},
install_requires = ['setuptools',
'numpy>=1.11.1',
'astropy>=1.0.1',
'sqlalchemy>=1.0.12',
'pymysql',
'matplotlib',
'scipy',
'fitsio',
'psutil',
'beautifulsoup4',
'pyfastcopy']
)
| bsd-3-clause |
pycroscopy/pycroscopy | pycroscopy/processing/svd_utils.py | 1 | 20291 | # -*- coding: utf-8 -*-
"""
USID utilities for performing randomized singular value decomposition and reconstructing results
Created on Mon Mar 28 09:45:08 2016
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import
import time
from multiprocessing import cpu_count
import numpy as np
from sklearn.utils import gen_batches
from sklearn.utils.extmath import randomized_svd
from sidpy.hdf.reg_ref import get_indices_for_region_ref, create_region_reference
from sidpy.hdf.hdf_utils import get_attr, write_simple_attrs, copy_attributes
from sidpy.proc.comp_utils import get_available_memory
from sidpy.base.string_utils import format_time
from sidpy.hdf.dtype_utils import check_dtype, stack_real_to_target_dtype
from pyUSID.processing.process import Process
from .proc_utils import get_component_slice
from pyUSID.io.hdf_utils import find_results_groups, \
reshape_to_n_dims, write_main_dataset, create_results_group, \
create_indexed_group, find_dataset
from pyUSID import Dimension
from pyUSID.io.anc_build_utils import calc_chunks
from pyUSID import USIDataset
import h5py
from matplotlib import pyplot as plt
from pyUSID.viz import plot_utils
class SVD(Process):
"""
This class provides a file-wrapper around the :meth:`sklearn.utils.extmath.randomized_svd` function.
In other words, it extracts and then reformats the data present in the provided :class:`pyUSID.USIDataset` object,
performs the randomized SVD operation and writes the results back to the USID HDF5 file after
formatting the results in an USID compliant manner.
"""
def __init__(self, h5_main, num_components=None, **kwargs):
"""
Perform the SVD decomposition on the selected dataset and write the results to h5 file.
Parameters
----------
h5_main : :class:`pyUSID.USIDataset` object
USID Main HDF5 dataset that will be decomposed
num_components : int, optional
Number of components to decompose h5_main into. Default None.
h5_target_group : h5py.Group, optional. Default = None
Location where to look for existing results and to place newly
computed results. Use this kwarg if the results need to be written
to a different HDF5 file. By default, this value is set to the
parent group containing `h5_main`
kwargs
Arguments to be sent to Process
"""
super(SVD, self).__init__(h5_main, 'SVD', **kwargs)
'''
Calculate the size of the main data in memory and compare to max_mem
We use the minimum of the actual dtype's itemsize and float32 since we
don't want to read it in yet and do the proper type conversions.
'''
n_samples, n_features = h5_main.shape
self.data_transform_func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_main)
if num_components is None:
num_components = min(n_samples, n_features)
else:
num_components = min(n_samples, n_features, num_components)
self.num_components = num_components
# Check that we can actually compute the SVD with the selected number of components
self._check_available_mem()
self.parms_dict = {'num_components': num_components}
self.duplicate_h5_groups, self.partial_h5_groups = self._check_for_duplicates()
# supercharge h5_main!
self.h5_main = USIDataset(self.h5_main)
self.__u = None
self.__v = None
self.__s = None
def test(self, override=False):
"""
Applies randomised VD to the dataset. This function does NOT write results to the hdf5 file. Call compute() to
write to the file. Handles complex, compound datasets such that the V matrix is of the same data-type as the
input matrix.
Parameters
----------
override : bool, optional. default = False
Set to true to recompute results if prior results are available. Else, returns existing results
Returns
-------
U : :class:`numpy.ndarray`
Abundance matrix
S : :class:`numpy.ndarray`
variance vector
V : :class:`numpy.ndarray`
eigenvector matrix
"""
'''
Check if a number of compnents has been set and ensure that the number is less than
the minimum axis length of the data. If both conditions are met, use fsvd. If not
use the regular svd.
C.Smith -- We might need to put a lower limit on num_comps in the future. I don't
know enough about svd to be sure.
'''
if not override:
if isinstance(self.duplicate_h5_groups, list) and len(self.duplicate_h5_groups) > 0:
self.h5_results_grp = self.duplicate_h5_groups[-1]
print('Returning previously computed results from: {}'.format(self.h5_results_grp.name))
print('set the "override" flag to True to recompute results')
return reshape_to_n_dims(self.h5_results_grp['U'])[0], self.h5_results_grp['S'][()], \
reshape_to_n_dims(self.h5_results_grp['V'])[0]
self.h5_results_grp = None
t1 = time.time()
self.__u, self.__s, self.__v = randomized_svd(self.data_transform_func(self.h5_main), self.num_components,
n_iter=3)
self.__v = stack_real_to_target_dtype(self.__v, self.h5_main.dtype)
print('Took {} to compute randomized SVD'.format(format_time(time.time() - t1)))
u_mat, success = reshape_to_n_dims(self.__u, h5_pos=self.h5_main.h5_pos_inds,
h5_spec=np.expand_dims(np.arange(self.__u.shape[1]), axis=0))
if not success:
raise ValueError('Could not reshape U to N-Dimensional dataset! Error:' + success)
# When the source dataset has a singular valued spectroscopic dimension
# stack_real_to_target causes V to lose all its dimensions
if self.__v.ndim == 0:
# However, we want V to be 2D:
self.__v = np.atleast_2d(self.__v)
v_mat, success = reshape_to_n_dims(self.__v, h5_pos=np.expand_dims(np.arange(self.__u.shape[1]), axis=1),
h5_spec=self.h5_main.h5_spec_inds)
if not success:
raise ValueError('Could not reshape V to N-Dimensional dataset! Error:' + success)
return u_mat, self.__s, v_mat
def compute(self, override=False):
"""
Computes SVD (by calling test_on_subset() if it has not already been called) and writes results to file.
Consider calling test() to check results before writing to file. Results are deleted from memory
upon writing to the HDF5 file
Parameters
----------
override : bool, optional. default = False
Set to true to recompute results if prior results are available. Else, returns existing results
Returns
-------
h5_results_grp : :class:`h5py.Group` object
HDF5 Group containing all the results
"""
if self.__u is None and self.__v is None and self.__s is None:
self.test(override=override)
if self.h5_results_grp is None:
self._write_results_chunk()
self.delete_results()
h5_group = self.h5_results_grp
return h5_group
def delete_results(self):
"""
Deletes results from memory.
"""
del self.__u, self.__s, self.__v
self.__u = None
self.__v = None
self.__s = None
def _write_results_chunk(self):
"""
Writes the provided SVD results to file
Parameters
----------
"""
comp_dim = Dimension('Principal Component', 'a. u.', len(self.__s))
h5_svd_group = create_results_group(self.h5_main, self.process_name,
h5_parent_group=self._h5_target_group)
self.h5_results_grp = h5_svd_group
self._write_source_dset_provenance()
write_simple_attrs(h5_svd_group, self.parms_dict)
write_simple_attrs(h5_svd_group, {'svd_method': 'sklearn-randomized'})
h5_u = write_main_dataset(h5_svd_group, np.float32(self.__u), 'U', 'Abundance', 'a.u.', None, comp_dim,
h5_pos_inds=self.h5_main.h5_pos_inds, h5_pos_vals=self.h5_main.h5_pos_vals,
dtype=np.float32, chunks=calc_chunks(self.__u.shape, np.float32(0).itemsize))
# print(get_attr(self.h5_main, 'quantity')[0])
h5_v = write_main_dataset(h5_svd_group, self.__v, 'V', get_attr(self.h5_main, 'quantity')[0],
'a.u.', comp_dim, None, h5_spec_inds=self.h5_main.h5_spec_inds,
h5_spec_vals=self.h5_main.h5_spec_vals,
chunks=calc_chunks(self.__v.shape, self.h5_main.dtype.itemsize))
# No point making this 1D dataset a main dataset
h5_s = h5_svd_group.create_dataset('S', data=np.float32(self.__s))
'''
Check h5_main for plot group references.
Copy them into V if they exist
'''
for key in self.h5_main.attrs.keys():
if '_Plot_Group' not in key:
continue
ref_inds = get_indices_for_region_ref(self.h5_main, self.h5_main.attrs[key], return_method='corners')
ref_inds = ref_inds.reshape([-1, 2, 2])
ref_inds[:, 1, 0] = h5_v.shape[0] - 1
svd_ref = create_region_reference(h5_v, ref_inds)
h5_v.attrs[key] = svd_ref
# Marking completion:
self._status_dset_name = 'completed_positions'
self._h5_status_dset = h5_svd_group.create_dataset(self._status_dset_name,
data=np.ones(self.h5_main.shape[0], dtype=np.uint8))
# keeping legacy option:
h5_svd_group.attrs['last_pixel'] = self.h5_main.shape[0]
def _check_available_mem(self):
"""
Check that there is enough memory to perform the SVD decomposition.
Returns
-------
sufficient_mem : bool
True is enough memory found, False otherwise.
"""
if self.verbose:
print('Checking memory availability.')
n_samples, n_features = self.h5_main.shape
s_mem_per_comp = np.float32(0).itemsize
u_mem_per_comp = np.float32(0).itemsize * n_samples
v_mem_per_comp = self.h5_main.dtype.itemsize * n_features
mem_per_comp = s_mem_per_comp + u_mem_per_comp + v_mem_per_comp
max_mem = get_available_memory()
avail_mem = 0.75 * max_mem
free_mem = avail_mem - self.h5_main.__sizeof__()
if free_mem <= 0:
error_message = 'Cannot load main dataset into memory.\n' + \
'Available memory is {}. Dataset needs {}.'.format(avail_mem,
self.h5_main.__sizeof__())
raise MemoryError(error_message)
if self.verbose:
print('Memory available for SVD is {}.'.format(free_mem))
print('Memory needed per component is {}.'.format(mem_per_comp))
cant_svd = (free_mem - self.num_components * mem_per_comp) <= 0
if cant_svd:
max_comps = np.floor(free_mem / mem_per_comp, dtype=int)
error_message = 'Not enough free memory for performing SVD with requested number of parameters.\n' + \
'Maximum possible parameters is {}.'.format(max_comps)
raise MemoryError(error_message)
###############################################################################
def simplified_kpca(kpca, source_data):
"""
Performs kernel PCA on the provided dataset and returns the familiar
eigenvector, eigenvalue, and scree matrices.
Note that the positions in the eigenvalues may need to be transposed
Parameters
----------
kpca : KernelPCA object
configured Kernel PCA object ready to perform analysis
source_data : 2D numpy array
Data arranged as [iteration, features] example - [position, time]
Returns
-------
eigenvalues : 2D numpy array
Eigenvalues in the original space arranged as [component,iteration]
scree : 1D numpy array
S component
eigenvector : 2D numpy array
Eigenvectors in the original space arranged as [component,features]
"""
X_kpca = kpca.fit(source_data.T)
eigenvectors = X_kpca.alphas_.T
eigenvalues = X_kpca.fit_transform(source_data)
# kpca_explained_variance = np.var(kpca.fit_transform(source_data), axis=0)
# information_content = kpca_explained_variance / np.sum(kpca_explained_variance)
scree = kpca.lambdas_
return eigenvalues, scree, eigenvectors
def rebuild_svd(h5_main, components=None, cores=None, max_RAM_mb=1024):
"""
Rebuild the Image from the SVD results on the windows
Optionally, only use components less than n_comp.
Parameters
----------
h5_main : hdf5 Dataset
dataset which SVD was performed on
components : {int, iterable of int, slice} optional
Defines which components to keep
Default - None, all components kept
Input Types
integer : Components less than the input will be kept
length 2 iterable of integers : Integers define start and stop of component slice to retain
other iterable of integers or slice : Selection of component indices to retain
cores : int, optional
How many cores should be used to rebuild
Default - None, all but 2 cores will be used, min 1
max_RAM_mb : int, optional
Maximum ammount of memory to use when rebuilding, in Mb.
Default - 1024Mb
Returns
-------
rebuilt_data : HDF5 Dataset
the rebuilt dataset
"""
comp_slice, num_comps = get_component_slice(components, total_components=h5_main.shape[1])
if isinstance(comp_slice, np.ndarray):
comp_slice = list(comp_slice)
dset_name = h5_main.name.split('/')[-1]
# Ensuring that at least one core is available for use / 2 cores are available for other use
max_cores = max(1, cpu_count() - 2)
# print('max_cores',max_cores)
if cores is not None:
cores = min(round(abs(cores)), max_cores)
else:
cores = max_cores
max_memory = min(max_RAM_mb * 1024 ** 2, 0.75 * get_available_memory())
if cores != 1:
max_memory = int(max_memory / 2)
'''
Get the handles for the SVD results
'''
try:
h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]
h5_S = h5_svd_group['S']
h5_U = h5_svd_group['U']
h5_V = h5_svd_group['V']
except KeyError:
raise KeyError('SVD Results for {dset} were not found.'.format(dset=dset_name))
except:
raise
func, is_complex, is_compound, n_features, type_mult = check_dtype(h5_V)
'''
Calculate the size of a single batch that will fit in the available memory
'''
n_comps = h5_S[comp_slice].size
mem_per_pix = (h5_U.dtype.itemsize + h5_V.dtype.itemsize * h5_V.shape[1]) * n_comps
fixed_mem = h5_main.size * h5_main.dtype.itemsize
if cores is None:
free_mem = max_memory - fixed_mem
else:
free_mem = max_memory * 2 - fixed_mem
batch_size = int(round(float(free_mem) / mem_per_pix))
batch_slices = gen_batches(h5_U.shape[0], batch_size)
print('Reconstructing in batches of {} positions.'.format(batch_size))
print('Batchs should be {} Mb each.'.format(mem_per_pix * batch_size / 1024.0 ** 2))
'''
Loop over all batches.
'''
ds_V = np.dot(np.diag(h5_S[comp_slice]), func(h5_V[comp_slice, :]))
rebuild = np.zeros((h5_main.shape[0], ds_V.shape[1]))
for ibatch, batch in enumerate(batch_slices):
rebuild[batch, :] += np.dot(h5_U[batch, comp_slice], ds_V)
rebuild = stack_real_to_target_dtype(rebuild, h5_V.dtype)
print('Completed reconstruction of data from SVD results. Writing to file.')
'''
Create the Group and dataset to hold the rebuild data
'''
rebuilt_grp = create_indexed_group(h5_svd_group, 'Rebuilt_Data')
h5_rebuilt = write_main_dataset(rebuilt_grp, rebuild, 'Rebuilt_Data',
get_attr(h5_main, 'quantity'), get_attr(h5_main, 'units'),
None, None,
h5_pos_inds=h5_main.h5_pos_inds, h5_pos_vals=h5_main.h5_pos_vals,
h5_spec_inds=h5_main.h5_spec_inds, h5_spec_vals=h5_main.h5_spec_vals,
chunks=h5_main.chunks, compression=h5_main.compression)
if isinstance(comp_slice, slice):
rebuilt_grp.attrs['components_used'] = '{}-{}'.format(comp_slice.start, comp_slice.stop)
else:
rebuilt_grp.attrs['components_used'] = components
copy_attributes(h5_main, h5_rebuilt, skip_refs=False)
h5_main.file.flush()
print('Done writing reconstructed data to file.')
return h5_rebuilt
def plot_svd(h5_main, savefig=False, num_plots = 16, **kwargs):
'''
Replots the SVD showing the skree, abundance maps, and eigenvectors.
If h5_main is a Dataset, it will default to the most recent SVD group from that
Dataset.
If h5_main is the results group, then it will plot the values for that group.
Parameters
----------
h5_main : USIDataset or h5py Dataset or h5py Group
savefig : bool, optional
Saves the figures to disk with some default names
num_plots : int
Default number of eigenvectors and abundance plots to show
kwargs : dict, optional
keyword arguments for svd filtering
Returns
-------
None
'''
if isinstance(h5_main, h5py.Group):
_U = find_dataset(h5_main, 'U')[-1]
_V = find_dataset(h5_main, 'V')[-1]
units = 'arbitrary (a.u.)'
h5_spec_vals = np.arange(_V.shape[1])
h5_svd_group = _U.parent
else:
h5_svd_group = find_results_groups(h5_main, 'SVD')[-1]
units = h5_main.attrs['quantity']
h5_spec_vals = h5_main.get_spec_values('Time')
h5_U = h5_svd_group['U']
h5_V = h5_svd_group['V']
h5_S = h5_svd_group['S']
_U = USIDataset(h5_U)
[num_rows, num_cols] = _U.pos_dim_sizes
abun_maps = np.reshape(h5_U[:,:16], (num_rows, num_cols,-1))
eigen_vecs = h5_V[:16, :]
skree_sum = np.zeros(h5_S.shape)
for i in range(h5_S.shape[0]):
skree_sum[i] = np.sum(h5_S[:i])/np.sum(h5_S)
plt.figure()
plt.plot(skree_sum, 'bo')
plt.title('Cumulative Variance')
plt.xlabel('Total Components')
plt.ylabel('Total variance ratio (a.u.)')
if savefig:
plt.savefig('Cumulative_variance_plot.png')
fig_skree, axes = plot_utils.plot_scree(h5_S, title='Scree plot')
fig_skree.tight_layout()
if savefig:
plt.savefig('Scree_plot.png')
fig_abun, axes = plot_utils.plot_map_stack(abun_maps, num_comps=num_plots, title='SVD Abundance Maps',
color_bar_mode='single', cmap='inferno', reverse_dims=True,
fig_mult=(3.5,3.5), facecolor='white', **kwargs)
fig_abun.tight_layout()
if savefig:
plt.savefig('Abundance_maps.png')
fig_eigvec, axes = plot_utils.plot_curves(h5_spec_vals*1e3, eigen_vecs, use_rainbow_plots=False,
x_label='Time (ms)', y_label=units,
num_plots=num_plots, subtitle_prefix='Component',
title='SVD Eigenvectors', evenly_spaced=False,
**kwargs)
fig_eigvec.tight_layout()
if savefig:
plt.savefig('Eigenvectors.png')
return | mit |
juliojsb/sarviewer | plotters/matplotlib/swap.py | 1 | 2062 | #!/usr/bin/env python2
"""
Author :Julio Sanz
Website :www.elarraydejota.com
Email :juliojosesb@gmail.com
Description :Script to create a graph about swap usage
Dependencies :Python 2.x, matplotlib
Usage :python swap.py
License :GPLv3
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
from datetime import datetime
import matplotlib.dates
# ======================
# VARIABLES
# ======================
# Aesthetic parameters
plt.rcParams.update({'font.size': 8})
plt.rcParams['lines.linewidth'] = 1.5
time_format = matplotlib.dates.DateFormatter('%H:%M:%S')
plt.gca().xaxis.set_major_formatter(time_format)
plt.gcf().autofmt_xdate()
# Time (column 0)
x = []
# Data arrays
swap_free = []
swap_used = []
# ======================
# FUNCTIONS
# ======================
def generate_graph():
with open('../../data/swap.dat', 'r') as csvfile:
data_source = csv.reader(csvfile, delimiter=' ', skipinitialspace=True)
for row in data_source:
# [0] column is a time column
# Convert to datetime data type
a = datetime.strptime((row[0]),'%H:%M:%S')
x.append((a))
# The remaining columns contain data
swap_free.append(str(int(row[1])/1024))
swap_used.append(str(int(row[2])/1024))
# Plot lines
plt.plot(x,swap_used, label='Used', color='r', antialiased=True)
plt.plot(x,swap_free, label='Free', color='g', antialiased=True)
# Graph properties
plt.xlabel('Time',fontstyle='italic')
plt.ylabel('SWAP (MB)',fontstyle='italic')
plt.title('SWAP usage graph')
plt.grid(linewidth=0.4, antialiased=True)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2, fancybox=True, shadow=True)
plt.autoscale(True)
# Graph saved to PNG file
plt.savefig('../../graphs/swap.png', bbox_inches='tight')
#plt.show()
# ======================
# MAIN
# ======================
if __name__ == '__main__':
generate_graph() | gpl-3.0 |
deepmind/grid-cells | utils.py | 1 | 5720 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating the training graph and plotting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import ensembles # pylint: disable=g-bad-import-order
np.seterr(invalid="ignore")
def get_place_cell_ensembles(
env_size, neurons_seed, targets_type, lstm_init_type, n_pc, pc_scale):
"""Create the ensembles for the Place cells."""
place_cell_ensembles = [
ensembles.PlaceCellEnsemble(
n,
stdev=s,
pos_min=-env_size / 2.0,
pos_max=env_size / 2.0,
seed=neurons_seed,
soft_targets=targets_type,
soft_init=lstm_init_type)
for n, s in zip(n_pc, pc_scale)
]
return place_cell_ensembles
def get_head_direction_ensembles(
neurons_seed, targets_type, lstm_init_type, n_hdc, hdc_concentration):
"""Create the ensembles for the Head direction cells."""
head_direction_ensembles = [
ensembles.HeadDirectionCellEnsemble(
n,
concentration=con,
seed=neurons_seed,
soft_targets=targets_type,
soft_init=lstm_init_type)
for n, con in zip(n_hdc, hdc_concentration)
]
return head_direction_ensembles
def encode_initial_conditions(init_pos, init_hd, place_cell_ensembles,
head_direction_ensembles):
initial_conds = []
for ens in place_cell_ensembles:
initial_conds.append(
tf.squeeze(ens.get_init(init_pos[:, tf.newaxis, :]), axis=1))
for ens in head_direction_ensembles:
initial_conds.append(
tf.squeeze(ens.get_init(init_hd[:, tf.newaxis, :]), axis=1))
return initial_conds
def encode_targets(target_pos, target_hd, place_cell_ensembles,
head_direction_ensembles):
ensembles_targets = []
for ens in place_cell_ensembles:
ensembles_targets.append(ens.get_targets(target_pos))
for ens in head_direction_ensembles:
ensembles_targets.append(ens.get_targets(target_hd))
return ensembles_targets
def clip_all_gradients(g, var, limit):
# print(var.name)
return (tf.clip_by_value(g, -limit, limit), var)
def clip_bottleneck_gradient(g, var, limit):
if ("bottleneck" in var.name or "pc_logits" in var.name):
return (tf.clip_by_value(g, -limit, limit), var)
else:
return (g, var)
def no_clipping(g, var):
return (g, var)
def concat_dict(acc, new_data):
"""Dictionary concatenation function."""
def to_array(kk):
if isinstance(kk, np.ndarray):
return kk
else:
return np.asarray([kk])
for k, v in new_data.iteritems():
if isinstance(v, dict):
if k in acc:
acc[k] = concat_dict(acc[k], v)
else:
acc[k] = concat_dict(dict(), v)
else:
v = to_array(v)
if k in acc:
acc[k] = np.concatenate([acc[k], v])
else:
acc[k] = np.copy(v)
return acc
def get_scores_and_plot(scorer,
data_abs_xy,
activations,
directory,
filename,
plot_graphs=True, # pylint: disable=unused-argument
nbins=20, # pylint: disable=unused-argument
cm="jet",
sort_by_score_60=True):
"""Plotting function."""
# Concatenate all trajectories
xy = data_abs_xy.reshape(-1, data_abs_xy.shape[-1])
act = activations.reshape(-1, activations.shape[-1])
n_units = act.shape[1]
# Get the rate-map for each unit
s = [
scorer.calculate_ratemap(xy[:, 0], xy[:, 1], act[:, i])
for i in xrange(n_units)
]
# Get the scores
score_60, score_90, max_60_mask, max_90_mask, sac = zip(
*[scorer.get_scores(rate_map) for rate_map in s])
# Separations
# separations = map(np.mean, max_60_mask)
# Sort by score if desired
if sort_by_score_60:
ordering = np.argsort(-np.array(score_60))
else:
ordering = range(n_units)
# Plot
cols = 16
rows = int(np.ceil(n_units / cols))
fig = plt.figure(figsize=(24, rows * 4))
for i in xrange(n_units):
rf = plt.subplot(rows * 2, cols, i + 1)
acr = plt.subplot(rows * 2, cols, n_units + i + 1)
if i < n_units:
index = ordering[i]
title = "%d (%.2f)" % (index, score_60[index])
# Plot the activation maps
scorer.plot_ratemap(s[index], ax=rf, title=title, cmap=cm)
# Plot the autocorrelation of the activation maps
scorer.plot_sac(
sac[index],
mask_params=max_60_mask[index],
ax=acr,
title=title,
cmap=cm)
# Save
if not os.path.exists(directory):
os.makedirs(directory)
with PdfPages(os.path.join(directory, filename), "w") as f:
plt.savefig(f, format="pdf")
plt.close(fig)
return (np.asarray(score_60), np.asarray(score_90),
np.asarray(map(np.mean, max_60_mask)),
np.asarray(map(np.mean, max_90_mask)))
| apache-2.0 |
looooo/paraBEM | examples/plots/lifting_line.py | 1 | 1404 | from __future__ import division
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import paraBEM
from paraBEM.liftingline import LiftingLine
from paraBEM.utils import check_path
# WingGeometry
spw = 2
numpos = 50
z_fac_1 = -0.3
z_fac_2 = -0.7
y = np.sin(np.linspace(0, np.pi/2, numpos)) * spw/2
x = [0. for _ in y]
z = [i**2 * z_fac_1 + i**6 * z_fac_2 for i in y]
mirror = lambda xyz: [xyz[0], -xyz[1], xyz[2]]
wing = list(zip(x, y, z))
wing = list(map(mirror, wing))[::-1] + list(wing)[1:]
wing = [paraBEM.Vector3(*i) for i in wing]
# LiftingLine
lifting_line = LiftingLine(wing)
lifting_line.v_inf = paraBEM.Vector3(1, 0, 0)
lifting_line.solve_for_best_gamma(1)
gamma = [i.best_gamma for i in lifting_line.segments]
gamma_max = max(gamma)
# Plot
gamma_el = lambda y: gamma_max * (1 - (y / spw * 2)**2)**(1 / 2)
mids = [[i.mids.x, i.mids.y, i.mids.z] for i in lifting_line.segments]
x, y, z = zip(*mids)
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
ax1.plot(y, z)
ax2 = fig.add_subplot(3, 1, 2)
ax2.plot(y, x, marker="x")
ax3 = fig.add_subplot(3, 1, 3)
y_el = np.linspace(-1, 1, 400)
ax3.plot([-spw/2] + list(y) + [spw/2], [0] + gamma + [0], marker="x")
ax3.plot(y_el, list(map(gamma_el, y_el)))
plt.savefig(check_path("results/2d/liftingline.png"))
total = 0
for i in lifting_line.segments:
total += i.lift_factor * i.best_gamma
print(total)
| gpl-3.0 |
tdhopper/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
DTUWindEnergy/Python4WindEnergy | lesson 3/results/ebra.py | 1 | 8402 | # -*- coding: utf-8 -*- <nbformat>3.0</nbformat>
# <headingcell level=1>
# Plotting with Matplotlib
# <headingcell level=2>
# Prepare for action
# <codecell>
import numpy as np
import scipy as sp
import sympy
# Pylab combines the pyplot functionality (for plotting) with the numpy
# functionality (for mathematics and for working with arrays) in a single namespace
# aims to provide a closer MATLAB feel (the easy way). Note that his approach
# should only be used when doing some interactive quick and dirty data inspection.
# DO NOT USE THIS FOR SCRIPTS
#from pylab import *
# the convienient Matplotib plotting interface pyplot (the tidy/right way)
# use this for building scripts. The examples here will all use pyplot.
import matplotlib.pyplot as plt
# for using the matplotlib API directly (the hard and verbose way)
# use this when building applications, and/or backends
import matplotlib as mpl
# <markdowncell>
# How would you like the IPython notebook show your plots? In order to use the
# matplotlib IPython magic youre IPython notebook should be launched as
#
# ipython notebook --matplotlib=inline
#
# Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx'
#
# %matplotlib qt
#
# or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x
#
# %matplotib inline
#
# <codecell>
# activate pop up plots
#%matplotlib qt
# or change to inline plots
# %matplotlib inline
# <headingcell level=3>
# Matplotlib documentation
# <markdowncell>
# Finding your own way (aka RTFM). Hint: there is search box available!
#
# * http://matplotlib.org/contents.html
#
# The Matplotlib API docs:
#
# * http://matplotlib.org/api/index.html
#
# Pyplot, object oriented plotting:
#
# * http://matplotlib.org/api/pyplot_api.html
# * http://matplotlib.org/api/pyplot_summary.html
#
# Extensive gallery with examples:
#
# * http://matplotlib.org/gallery.html
# <headingcell level=3>
# Tutorials for those who want to start playing
# <markdowncell>
# If reading manuals is too much for you, there is a very good tutorial available here:
#
# * http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb
#
# Note that this tutorial uses
#
# from pylab import *
#
# which is usually not adviced in more advanced script environments. When using
#
# import matplotlib.pyplot as plt
#
# you need to preceed all plotting commands as used in the above tutorial with
#
# plt.
# <markdowncell>
# Give me more!
#
# [EuroScipy 2012 Matlotlib tutorial](http://www.loria.fr/~rougier/teaching/matplotlib/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.```
# <headingcell level=2>
# Plotting template starting point
# <codecell>
# some sample data
x = np.arange(-10,10,0.1)
# <markdowncell>
# To change the default plot configuration values.
# <codecell>
page_width_cm = 13
dpi = 200
inch = 2.54 # inch in cm
# setting global plot configuration using the RC configuration style
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=12) # tick labels
plt.rc('ytick', labelsize=20) # tick labels
plt.rc('axes', labelsize=20) # axes labels
# If you don’t need LaTeX, don’t use it. It is slower to plot, and text
# looks just fine without. If you need it, e.g. for symbols, then use it.
#plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac
# <codecell>
# create a figure instance, note that figure size is given in inches!
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
# set the big title (note aligment relative to figure)
fig.suptitle("suptitle 16, figure alignment", fontsize=16)
# actual plotting
ax.plot(x, x**2, label="label 12")
# set axes title (note aligment relative to axes)
ax.set_title("title 14, axes alignment", fontsize=14)
# axes labels
ax.set_xlabel('xlabel 12')
ax.set_ylabel(r'$y_{\alpha}$ 12', fontsize=8)
# legend
ax.legend(fontsize=12, loc="best")
# saving the figure in different formats
# fig.savefig('figure-%03i.png' % dpi, dpi=dpi)
# fig.savefig('figure.svg')
# fig.savefig('figure.eps')
# <codecell>
# following steps are only relevant when using figures as pop up windows (with %matplotlib qt)
# to update a figure with has been modified
fig.canvas.draw()
# show a figure
fig.show()
# <headingcell level=2>
# Exercise
# <markdowncell>
# The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend!
# <markdowncell>
# * add a grid to the plot
# <codecell>
plt.plot(x,x**2)
plt.grid('on')
# <markdowncell>
# * change the location of the legend to different places
# <codecell>
plt.plot(x,x**2, label="label 12")
plt.legend(fontsize=12, loc="upper right")
# <markdowncell>
# * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
# <codecell>
stride = max( int(len(x) / 20), 1)
plt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label="label 12")
plt.legend(fontsize=12, loc="upper center")
# <markdowncell>
# * add different sub-plots
# <codecell>
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
# <markdowncell>
# * size the figure such that when included on an A4 page the fonts are given in their true size
# <codecell>
# matplotlib.rcParams.update({'font.size': 22})
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
fig.set_size_inches(8.2,3) # using A4 width in inches?
fig.set_dpi(100)
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
# ax[0].set('xtick', labelsize=12) # tick labels
# .rc('ytick', labelsize=20) # tick labels
# .rc('axes', labelsize=20) # axes labels
# fig.savefig('figure.pdf')
# <markdowncell>
# * make a contour plot
# <codecell>
X, Y = np.meshgrid(x,x)
plt.figure()
plt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1))
plt.show
# im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# <markdowncell>
# * use twinx() to create a second axis on the right for the second plot
# <codecell>
plt.figure()
ax=plt.gca()
ax.plot(x,x**2)
ax2 = ax.twinx()
ax2.plot(x,x**4, 'r')
# <markdowncell>
# * add horizontal and vertical lines using axvline(), axhline()
# <codecell>
plt.figure()
plt.plot(x,x**2)
plt.axvline(2)
plt.axhline(10)
# <markdowncell>
# * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate()
# <codecell>
import datetime
dates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)])
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(dates,xrange(24))
fig.autofmt_xdate()
# <headingcell level=2>
# Advanced exercises
# <markdowncell>
# We are going to play a bit with regression
# <markdowncell>
# * Create a vector x of equally spaced number between $x \in [0, 5\pi]$ of 1000 points (keyword: linspace)
# <codecell>
n=1000
x=np.linspace(0,5*np.pi,n)
# <markdowncell>
# * create a vector y, so that y=sin(x) with some random noise
# <codecell>
y = np.sin(x) +np.random.rand(n)-0.5
yth = np.sin(x)
# <markdowncell>
# * plot it like this: ![test](files/plt1.png)
# <codecell>
fig=plt.figure()
ax=plt.gca()
ax.plot(x,y,'b.')
ax.plot(x,yth,'k--',label=r'$y=sin(x)$')
# <markdowncell>
# Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients)
#
# Plot it like this (use np.poly1d(coef)(x) to plot polynomials) ![test](files/plt2.png)
# <codecell>
for order in xrange(9):
coeff=np.polyfit(x,y,order)
ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order)
# shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# <codecell>
| apache-2.0 |
B3AU/waveTree | sklearn/utils/testing.py | 4 | 12125 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import scipy as sp
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
from .fixes import savemat
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises", "raises",
"with_setup", "assert_true", "assert_false", "assert_almost_equal",
"assert_array_equal", "assert_array_almost_equal",
"assert_array_less"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_others : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
module = __import__(modname, fromlist="dummy")
if ".tests." in modname:
continue
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_other:
estimators = [c for c in estimators if not c[0] in other]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in meta_estimators]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
| bsd-3-clause |
ahye/FYS2140-Resources | examples/animation/func_animate_sin.py | 1 | 1284 | #!/usr/bin/env python
"""
Created on Mon 2 Dec 2013
Eksempelscript som viser hvordan en sinusboelge kan animeres med
funksjonsanimasjon.
@author Benedicte Emilie Braekken
"""
from numpy import *
from matplotlib.pyplot import *
from matplotlib import animation
def wave( x, t ):
'''
Funksjonen beskriver en sinusboelge ved tiden t og punktet x.
'''
omega = 1 # Vinkelhastighet
k = 1 # Boelgetall
return sin( k * x - omega * t )
T = 10
dt = 0.01
nx = 1e3
nt = int( T / dt ) # Antall tidssteg
t = 0
all_waves = [] # Tom liste for aa ta vare paa boelgetilstandene
x = linspace( -pi, pi, nx )
while t < T:
# Legger til en ny boelgetilstand for hver kjoering
all_waves.append( wave( x, t ) )
t += dt
# Tegner initialtilstanden
fig = figure() # Passer paa aa ta vare paa figuren
line, = plot( x, all_waves[0] )
draw()
# Konstanter til animasjonen
FPS = 60 # Bilder i sekundet
inter = 1. / FPS # Tid mellom hvert bilde
def init():
'''
'''
line.set_data( [], [] )
return line,
def get_frame( frame ):
'''
'''
line.set_data( x, all_waves[ frame ] )
return line,
anim = animation.FuncAnimation( fig, get_frame, init_func=init,
frames=nt, interval=inter, blit=True )
show()
| mit |
briandalessandro/courses | deeplearning1/nbs/utils/utils.py | 8 | 7644 | from __future__ import division,print_function
import math, os, json, sys, re
import cPickle as pickle
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
import PIL
from PIL import Image
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
import scipy
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
import bcolz
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
from IPython.lib.display import FileLink
import theano
from theano import shared, tensor as T
from theano.tensor.nnet import conv2d, nnet
from theano.tensor.signal import pool
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, activity_l2, l1, activity_l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils.layer_utils import layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import *
from vgg16bn import *
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img):
return np.rollaxis(img,0,3).dot(to_bw)
def to_plot(img):
return np.rollaxis(img, 0, 3).astype(np.uint8)
def plot(img):
plt.imshow(to_plot(img))
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
if titles is not None:
sp.set_title(titles[i], fontsize=18)
plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
clipped = np.clip(arr, (1-mx)/1, mx)
return clipped/clipped.sum(axis=1)[:, np.newaxis]
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def onehot(x):
return to_categorical(x)
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
def adjust_dropout(weights, prev_p, new_p):
scal = (1-prev_p)/(1-new_p)
return [o*scal for o in weights]
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
return np.concatenate([batches.next() for i in range(batches.nb_sample)])
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def mk_size(img, r2c):
r,c,_ = img.shape
curr_r2c = r/c
new_r, new_c = r,c
if r2c>curr_r2c:
new_r = floor(c*r2c)
else:
new_c = floor(r/r2c)
arr = np.zeros((new_r, new_c, 3), dtype=np.float32)
r2=(new_r-r)//2
c2=(new_c-c)//2
arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img
return arr
def mk_square(img):
x,y,_ = img.shape
maxs = max(img.shape[:2])
y2=(maxs-y)//2
x2=(maxs-x)//2
arr = np.zeros((maxs,maxs,3), dtype=np.float32)
arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img
return arr
def vgg_ft(out_dim):
vgg = Vgg16()
vgg.ft(out_dim)
model = vgg.model
return model
def vgg_ft_bn(out_dim):
vgg = Vgg16BN()
vgg.ft(out_dim)
model = vgg.model
return model
def get_classes(path):
batches = get_batches(path+'train', shuffle=False, batch_size=1)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
val_batches.filenames, batches.filenames, test_batches.filenames)
def split_at(model, layer_type):
layers = model.layers
layer_idx = [index for index,layer in enumerate(layers)
if type(layer) is layer_type][-1]
return layers[:layer_idx+1], layers[layer_idx+1:]
class MixIterator(object):
def __init__(self, iters):
self.iters = iters
self.multi = type(iters) is list
if self.multi:
self.N = sum([it[0].N for it in self.iters])
else:
self.N = sum([it.N for it in self.iters])
def reset(self):
for it in self.iters: it.reset()
def __iter__(self):
return self
def next(self, *args, **kwargs):
if self.multi:
nexts = [[next(it) for it in o] for o in self.iters]
n0s = np.concatenate([n[0] for n in o])
n1s = np.concatenate([n[1] for n in o])
return (n0, n1)
else:
nexts = [next(it) for it in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
| apache-2.0 |
rnowling/pop-gen-models | single-pop/single_pop.py | 1 | 3379 | import sys
import numpy as np
import numpy.random as npr
from sklearn.neighbors.kde import KernelDensity
from scipy.special import gammaln
import matplotlib.pyplot as plt
from calculate_phist import read_counts
from calculate_phist import normalize_haplotypes
def log_factorial(n):
return gammaln(n+1)
def log_multinomial(xs, ps):
n = np.sum(xs)
log_prob = log_factorial(n) - np.sum(log_factorial(xs)) + np.sum(xs * np.log(ps + 0.0000000000001))
return log_prob
class KDE_MCMC_Sampler(object):
def __init__(self, observed_counts):
"""
Observed counts is 3D matrix of pop, locus, haplotype
"""
self.observed_counts = observed_counts
self.individual_counts = observed_counts.sum(axis=2)
self.observed_frequencies = normalize_haplotypes(observed_counts)
self.n_loci, self.n_pop, self.n_haplotypes = self.observed_counts.shape
# from bamova
self.DWEIGHT = 1.0
self.DADD = 0.00001
self.SMALL_NUM = 0.0000000000001
print "initializing frequencies"
self.freq = np.zeros((self.n_loci, self.n_haplotypes))
for l in xrange(self.n_loci):
self.freq[l, :] = self.sample_locus_freq(self.observed_frequencies[l, 0, :])
def sample_locus_freq(self, freq):
alphas = self.DWEIGHT * freq + self.DADD + self.SMALL_NUM
return npr.dirichlet(alphas)
def locus_prob(self, locus_obs_counts, locus_freq):
log_prob_sum = 0.0
for p in xrange(self.n_pop):
log_prob_sum += log_multinomial(locus_obs_counts[p], locus_freq)
return log_prob_sum
def step(self):
total_log_prob = 0.0
for l in xrange(self.n_loci):
locus_indiv_counts = self.individual_counts[l, :]
locus_obs_counts = self.observed_counts[l, :, :]
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
proposed_locus_freq = self.sample_locus_freq(self.freq[l, :])
proposed_log_prob = self.locus_prob(locus_obs_counts, proposed_locus_freq)
log_prob_ratio = proposed_log_prob - log_prob
log_r = np.log(npr.random())
if proposed_log_prob >= log_prob or log_r <= log_prob_ratio:
self.freq[l, :] = proposed_locus_freq
log_prob = proposed_log_prob
total_log_prob += log_prob
locus_prob = []
for l in xrange(self.n_loci):
log_prob = self.locus_prob(locus_obs_counts, self.freq[l, :])
locus_prob.append(log_prob)
return self.freq, total_log_prob, locus_prob
def plot_log_prob(flname, log_probs):
plt.clf()
plt.hold(True)
plt.hist(log_probs, bins=30)
plt.xlabel("Log Probability", fontsize=16)
plt.xlim([min(log_probs), 0.0])
plt.ylabel("Occurrences (Loci)", fontsize=16)
plt.savefig(flname, DPI=200)
def simulate(occur_fl, n_steps, plot_flname, prob_flname):
print "reading occurrences"
observed_counts = read_counts(occur_fl)
individual_counts = observed_counts.sum(axis=2)
observed_frequencies = normalize_haplotypes(observed_counts)
sampler = KDE_MCMC_Sampler(observed_counts)
fl = open(prob_flname, "w")
locus_log_prob = []
for i in xrange(n_steps):
freq, log_prob, locus_log_prob = sampler.step()
print "step", i, "log prob", log_prob
if i % 100 == 0:
for j, prob in enumerate(locus_log_prob):
fl.write("%s %s %s\n" % (i, j, prob))
fl.close()
plot_log_prob(plot_flname, locus_log_prob)
if __name__ == "__main__":
occur_fl = sys.argv[1]
n_steps = int(sys.argv[2])
plot_flname = sys.argv[3]
prob_flname = sys.argv[4]
simulate(occur_fl, n_steps, plot_flname, prob_flname)
| apache-2.0 |
elvandy/nltools | nltools/data/adjacency.py | 1 | 34227 | from __future__ import division
'''
This data class is for working with similarity/dissimilarity matrices
'''
__author__ = ["Luke Chang"]
__license__ = "MIT"
import os
import pandas as pd
import numpy as np
import six
from copy import deepcopy
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
from sklearn.utils import check_random_state
from scipy.spatial.distance import squareform
from scipy.stats import ttest_1samp
import seaborn as sns
import matplotlib.pyplot as plt
from nltools.stats import (correlation_permutation,
one_sample_permutation,
two_sample_permutation,
summarize_bootstrap,
matrix_permutation)
from nltools.stats import regress as regression
from nltools.plotting import (plot_stacked_adjacency,
plot_silhouette)
from nltools.utils import (all_same,
attempt_to_import,
concatenate,
_bootstrap_apply_func)
from .design_matrix import Design_Matrix
from joblib import Parallel, delayed
# Optional dependencies
nx = attempt_to_import('networkx', 'nx')
MAX_INT = np.iinfo(np.int32).max
class Adjacency(object):
'''
Adjacency is a class to represent Adjacency matrices as a vector rather
than a 2-dimensional matrix. This makes it easier to perform data
manipulation and analyses.
Args:
data: pandas data instance or list of files
matrix_type: (str) type of matrix. Possible values include:
['distance','similarity','directed','distance_flat',
'similarity_flat','directed_flat']
Y: Pandas DataFrame of training labels
**kwargs: Additional keyword arguments
'''
def __init__(self, data=None, Y=None, matrix_type=None, labels=None,
**kwargs):
if matrix_type is not None:
if matrix_type.lower() not in ['distance','similarity','directed',
'distance_flat','similarity_flat',
'directed_flat']:
raise ValueError("matrix_type must be [None,'distance', "
"'similarity','directed','distance_flat', "
"'similarity_flat','directed_flat']")
if data is None:
self.data = np.array([])
self.matrix_type = 'empty'
self.is_single_matrix = np.nan
self.issymmetric = np.nan
elif isinstance(data, list):
if isinstance(data[0], Adjacency):
tmp = concatenate(data)
for item in ['data', 'matrix_type', 'Y','issymmetric']:
setattr(self, item, getattr(tmp,item))
else:
d_all = []; symmetric_all = []; matrix_type_all = []
for d in data:
data_tmp, issymmetric_tmp, matrix_type_tmp, _ = self._import_single_data(d, matrix_type=matrix_type)
d_all.append(data_tmp)
symmetric_all.append(issymmetric_tmp)
matrix_type_all.append(matrix_type_tmp)
if not all_same(symmetric_all):
raise ValueError('Not all matrices are of the same '
'symmetric type.')
if not all_same(matrix_type_all):
raise ValueError('Not all matrices are of the same matrix '
'type.')
self.data = np.array(d_all)
self.issymmetric = symmetric_all[0]
self.matrix_type = matrix_type_all[0]
self.is_single_matrix = False
else:
self.data, self.issymmetric, self.matrix_type, self.is_single_matrix = self._import_single_data(data, matrix_type=matrix_type)
if Y is not None:
if isinstance(Y, six.string_types):
if os.path.isfile(Y):
Y = pd.read_csv(Y, header=None, index_col=None)
if isinstance(Y, pd.DataFrame):
if self.data.shape[0] != len(Y):
raise ValueError("Y does not match the correct size of "
"data")
self.Y = Y
else:
raise ValueError("Make sure Y is a pandas data frame.")
else:
self.Y = pd.DataFrame()
if labels is not None:
if not isinstance(labels, (list, np.ndarray)):
raise ValueError( "Make sure labels is a list or numpy array.")
if self.is_single_matrix:
if len(labels) != self.square_shape()[0]:
raise ValueError('Make sure the length of labels matches the shape of data.')
self.labels = deepcopy(labels)
else:
if len(labels) != len(self):
if len(labels) != self.square_shape()[0]:
raise ValueError('Make sure length of labels either '
'matches the number of Adjacency '
'matrices or the size of a single '
'matrix.')
else:
self.labels = list(labels) * len(self)
else:
if np.all(np.array([len(x) for x in labels]) !=self.square_shape()[0]):
raise ValueError("All lists of labels must be same length as shape of data.")
self.labels = deepcopy(labels)
else:
self.labels = None
def __repr__(self):
return ("%s.%s(shape=%s, square_shape=%s, Y=%s, is_symmetric=%s,"
"matrix_type=%s)") % (
self.__class__.__module__,
self.__class__.__name__,
self.shape(),
self.square_shape(),
len(self.Y),
self.issymmetric,
self.matrix_type)
def __getitem__(self,index):
new = self.copy()
if isinstance(index, int):
new.data = np.array(self.data[index, :]).flatten()
new.is_single_matrix = True
else:
new.data = np.array(self.data[index, :])
if not self.Y.empty:
new.Y = self.Y.iloc[index]
return new
def __len__(self):
if self.is_single_matrix:
return 1
else:
return self.data.shape[0]
def __iter__(self):
for x in range(len(self)):
yield self[x]
def __add__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data + y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = new.data + y.data
return new
def __sub__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data - y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = new.data - y.data
return new
def __mul__(self, y):
new = deepcopy(self)
if isinstance(y, (int, float)):
new.data = new.data * y
if isinstance(y, Adjacency):
if self.shape() != y.shape():
raise ValueError('Both Adjacency() instances need to be the '
'same shape.')
new.data = np.multiply(new.data, y.data)
return new
def _import_single_data(self, data, matrix_type=None):
''' Helper function to import single data matrix.'''
if isinstance(data, six.string_types):
if os.path.isfile(data):
data = pd.read_csv(data)
else:
raise ValueError('Make sure you have specified a valid file '
'path.')
def test_is_single_matrix(data):
if len(data.shape) == 1:
return True
else:
return False
if matrix_type is not None:
if matrix_type.lower() == 'distance_flat':
matrix_type = 'distance'
data = np.array(data)
issymmetric = True
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() == 'similarity_flat':
matrix_type = 'similarity'
data = np.array(data)
issymmetric = True
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() == 'directed_flat':
matrix_type = 'directed'
data = np.array(data).flatten()
issymmetric = False
is_single_matrix = test_is_single_matrix(data)
elif matrix_type.lower() in ['distance', 'similarity', 'directed']:
if data.shape[0] != data.shape[1]:
raise ValueError('Data matrix must be square')
data = np.array(data)
matrix_type = matrix_type.lower()
if matrix_type in ['distance', 'similarity']:
issymmetric = True
data = data[np.triu_indices(data.shape[0], k=1)]
else:
issymmetric = False
if isinstance(data, pd.DataFrame):
data = data.values.flatten()
elif isinstance(data, np.ndarray):
data = data.flatten()
is_single_matrix = True
else:
if len(data.shape) == 1: # Single Vector
try:
data = squareform(data)
except ValueError:
print('Data is not flattened upper triangle from '
'similarity/distance matrix or flattened directed '
'matrix.')
is_single_matrix = True
elif data.shape[0] == data.shape[1]: # Square Matrix
is_single_matrix = True
else: # Rectangular Matrix
data_all = deepcopy(data)
try:
data = squareform(data_all[0, :])
except ValueError:
print('Data is not flattened upper triangle from multiple '
'similarity/distance matrices or flattened directed '
'matrices.')
is_single_matrix = False
# Test if matrix is symmetrical
if np.all(data[np.triu_indices(data.shape[0], k=1)] == data.T[np.triu_indices(data.shape[0], k=1)]):
issymmetric = True
else:
issymmetric = False
# Determine matrix type
if issymmetric:
if np.sum(np.diag(data)) == 0:
matrix_type = 'distance'
elif np.sum(np.diag(data)) == data.shape[0]:
matrix_type = 'similarity'
data = data[np.triu_indices(data.shape[0], k=1)]
else:
matrix_type = 'directed'
data = data.flatten()
if not is_single_matrix:
data = data_all
return (data, issymmetric, matrix_type, is_single_matrix)
def isempty(self):
'''Check if Adjacency object is empty'''
return bool(self.matrix_type is 'empty')
def squareform(self):
'''Convert adjacency back to squareform'''
if self.issymmetric:
if self.is_single_matrix:
return squareform(self.data)
else:
return [squareform(x.data) for x in self]
else:
if self.is_single_matrix:
return self.data.reshape(int(np.sqrt(self.data.shape[0])),
int(np.sqrt(self.data.shape[0])))
else:
return [x.data.reshape(int(np.sqrt(x.data.shape[0])),
int(np.sqrt(x.data.shape[0]))) for x in self]
def plot(self, limit=3, *args, **kwargs):
''' Create Heatmap of Adjacency Matrix'''
if self.is_single_matrix:
f, a = plt.subplots(nrows=1, figsize=(7, 5))
if self.labels is None:
sns.heatmap(self.squareform(), square=True, ax=a,
*args, **kwargs)
else:
sns.heatmap(self.squareform(), square=True, ax=a,
xticklabels=self.labels,
yticklabels=self.labels,
*args, **kwargs)
else:
n_subs = np.minimum(len(self), limit)
f, a = plt.subplots(nrows=n_subs, figsize=(7, len(self)*5))
if self.labels is None:
for i in range(n_subs):
sns.heatmap(self[i].squareform(), square=True, ax=a[i],
*args, **kwargs)
else:
for i in range(n_subs):
sns.heatmap(self[i].squareform(), square=True,
xticklabels=self.labels[i],
yticklabels=self.labels[i],
ax=a[i], *args, **kwargs)
return f
def mean(self, axis=0):
''' Calculate mean of Adjacency
Args:
axis: calculate mean over features (0) or data (1).
For data it will be on upper triangle.
Returns:
mean: float if single, adjacency if axis=0, np.array if axis=1
and multiple
'''
if self.is_single_matrix:
return np.mean(self.data)
else:
if axis == 0:
return Adjacency(data=np.mean(self.data, axis=axis),
matrix_type=self.matrix_type + '_flat')
elif axis == 1:
return np.mean(self.data, axis=axis)
def std(self, axis=0):
''' Calculate standard deviation of Adjacency
Args:
axis: calculate std over features (0) or data (1).
For data it will be on upper triangle.
Returns:
std: float if single, adjacency if axis=0, np.array if axis=1 and
multiple
'''
if self.is_single_matrix:
return np.std(self.data)
else:
if axis == 0:
return Adjacency(data=np.std(self.data, axis=axis),
matrix_type=self.matrix_type + '_flat')
elif axis == 1:
return np.std(self.data, axis=axis)
def shape(self):
''' Calculate shape of data. '''
return self.data.shape
def square_shape(self):
''' Calculate shape of squareform data. '''
if self.matrix_type is 'empty':
return np.array([])
else:
if self.is_single_matrix:
return self.squareform().shape
else:
return self[0].squareform().shape
def copy(self):
''' Create a copy of Adjacency object.'''
return deepcopy(self)
def append(self, data):
''' Append data to Adjacency instance
Args:
data: Adjacency instance to append
Returns:
out: new appended Adjacency instance
'''
if not isinstance(data, Adjacency):
raise ValueError('Make sure data is a Adjacency instance.')
if self.isempty():
out = data.copy()
else:
out = self.copy()
if self.square_shape() != data.square_shape():
raise ValueError('Data is not the same shape as Adjacency '
'instance.')
out.data = np.vstack([self.data, data.data])
out.is_single_matrix = False
if out.Y.size:
out.Y = self.Y.append(data.Y)
return out
def write(self, file_name, method='long'):
''' Write out Adjacency object to csv file.
Args:
file_name (str): name of file name to write
method (str): method to write out data ['long','square']
'''
if method not in ['long', 'square']:
raise ValueError('Make sure method is ["long","square"].')
if self.is_single_matrix:
if method is 'long':
out = pd.DataFrame(self.data).to_csv(file_name, index=None)
elif method is 'square':
out = pd.DataFrame(self.squareform()).to_csv(file_name,
index=None)
else:
if method is 'long':
out = pd.DataFrame(self.data).to_csv(file_name, index=None)
elif method is 'square':
raise NotImplementedError('Need to decide how we should write '
'out multiple matrices. As separate '
'files?')
def similarity(self, data, plot=False, perm_type='2d', n_permute=5000, metric='spearman', **kwargs):
''' Calculate similarity between two Adjacency matrices.
Default is to use spearman correlation and permutation test.
Args:
data: Adjacency data, or 1-d array same size as self.data
perm_type: '1d','2d', or None
metric: 'spearman','pearson','kendall'
'''
if not isinstance(data, Adjacency):
data2 = Adjacency(data)
else:
data2 = data.copy()
if perm_type is None:
n_permute=0
similarity_func = correlation_permutation
elif perm_type == '1d':
similarity_func = correlation_permutation
elif perm_type == '2d':
similarity_func = matrix_permutation
if self.is_single_matrix:
if plot:
plot_stacked_adjacency(self, data)
return similarity_func(self.data, data2.data, metric=metric, n_permute=n_permute, **kwargs)
else:
if plot:
_, a = plt.subplots(len(self))
for i in a:
plot_stacked_adjacency(self, data, ax=i)
return [similarity_func(x.data, data2.data, metric=metric, n_permute=n_permute, **kwargs) for x in self]
def distance(self, method='correlation', **kwargs):
''' Calculate distance between images within an Adjacency() instance.
Args:
method: type of distance metric (can use any scikit learn or
sciypy metric)
Returns:
dist: Outputs a 2D distance matrix.
'''
return Adjacency(pairwise_distances(self.data, metric=method, **kwargs),
matrix_type='distance')
def threshold(self, upper=None, lower=None, binarize=False):
'''Threshold Adjacency instance. Provide upper and lower values or
percentages to perform two-sided thresholding. Binarize will return
a mask image respecting thresholds if provided, otherwise respecting
every non-zero value.
Args:
upper: (float or str) Upper cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
lower: (float or str) Lower cutoff for thresholding. If string
will interpret as percentile; can be None for one-sided
thresholding.
binarize (bool): return binarized image respecting thresholds if
provided, otherwise binarize on every non-zero value;
default False
Returns:
Adjacency: thresholded Adjacency instance
'''
b = self.copy()
if isinstance(upper, six.string_types):
if upper[-1] is '%':
upper = np.percentile(b.data, float(upper[:-1]))
if isinstance(lower, six.string_types):
if lower[-1] is '%':
lower = np.percentile(b.data, float(lower[:-1]))
if upper and lower:
b.data[(b.data < upper) & (b.data > lower)] = 0
elif upper and not lower:
b.data[b.data < upper] = 0
elif lower and not upper:
b.data[b.data > lower] = 0
if binarize:
b.data[b.data != 0] = 1
return b
def to_graph(self):
''' Convert Adjacency into networkx graph. only works on
single_matrix for now.'''
if self.is_single_matrix:
if self.matrix_type == 'directed':
G = nx.DiGraph(self.squareform())
else:
G = nx.Graph(self.squareform())
if self.labels is not None:
labels = {x:y for x,y in zip(G.nodes,self.labels)}
nx.relabel_nodes(G, labels, copy=False)
return G
else:
raise NotImplementedError('This function currently only works on '
'single matrices.')
def ttest(self, permutation=False, **kwargs):
''' Calculate ttest across samples.
Args:
permutation: (bool) Run ttest as permutation. Note this can be very slow.
Returns:
out: (dict) contains Adjacency instances of t values (or mean if
running permutation) and Adjacency instance of p values.
'''
if self.is_single_matrix:
raise ValueError('t-test cannot be run on single matrices.')
if permutation:
t = []; p = []
for i in range(self.data.shape[1]):
stats = one_sample_permutation(self.data[:, i], **kwargs)
t.append(stats['mean'])
p.append(stats['p'])
t = Adjacency(np.array(t))
p = Adjacency(np.array(p))
else:
t = self.mean().copy()
p = deepcopy(t)
t.data, p.data = ttest_1samp(self.data, 0, 0)
return {'t': t, 'p':p}
def plot_label_distance(self, labels=None, ax=None):
''' Create a violin plot indicating within and between label distance
Args:
labels (np.array): numpy array of labels to plot
Returns:
violin plot handles
'''
if not self.is_single_matrix:
raise ValueError('This function only works on single adjacency '
'matrices.')
distance = pd.DataFrame(self.squareform())
if labels is None:
labels = np.array(deepcopy(self.labels))
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
for i in np.unique(labels):
tmp_w = pd.DataFrame(columns=out.columns, index=None)
tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_w['Type'] = 'Within'
tmp_w['Group'] = i
tmp_b = pd.DataFrame(columns=out.columns, index=None)
tmp_b['Distance'] = distance.loc[labels != i, labels != i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_b['Type'] = 'Between'
tmp_b['Group'] = i
out = out.append(tmp_w).append(tmp_b)
f = sns.violinplot(x="Group", y="Distance", hue="Type", data=out, split=True, inner='quartile',
palette={"Within": "lightskyblue", "Between": "red"}, ax=ax)
f.set_ylabel('Average Distance')
f.set_title('Average Group Distance')
return f
def stats_label_distance(self, labels=None, n_permute=5000, n_jobs=-1):
''' Calculate permutation tests on within and between label distance.
Args:
labels (np.array): numpy array of labels to plot
n_permute (int): number of permutations to run (default=5000)
Returns:
dict: dictionary of within and between group differences
and p-values
'''
if not self.is_single_matrix:
raise ValueError('This function only works on single adjacency '
'matrices.')
distance = pd.DataFrame(self.squareform())
if labels is not None:
labels = deepcopy(self.labels)
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Distance', 'Group', 'Type'], index=None)
for i in np.unique(labels):
tmp_w = pd.DataFrame(columns=out.columns, index=None)
tmp_w['Distance'] = distance.loc[labels == i, labels == i].values[np.triu_indices(sum(labels == i), k=1)]
tmp_w['Type'] = 'Within'
tmp_w['Group'] = i
tmp_b = pd.DataFrame(columns=out.columns, index=None)
tmp_b['Distance'] = distance.loc[labels == i, labels != i].values.flatten()
tmp_b['Type'] = 'Between'
tmp_b['Group'] = i
out = out.append(tmp_w).append(tmp_b)
stats = dict()
for i in np.unique(labels):
# Within group test
tmp1 = out.loc[(out['Group'] == i) & (out['Type'] == 'Within'), 'Distance']
tmp2 = out.loc[(out['Group'] == i) & (out['Type'] == 'Between'), 'Distance']
stats[str(i)] = two_sample_permutation(tmp1, tmp2,
n_permute=n_permute, n_jobs=n_jobs)
return stats
def plot_silhouette(self, labels=None, ax=None, permutation_test=True,
n_permute=5000, **kwargs):
'''Create a silhouette plot'''
distance = pd.DataFrame(self.squareform())
if labels is None:
labels = np.array(deepcopy(self.labels))
else:
if len(labels) != distance.shape[0]:
raise ValueError('Labels must be same length as distance matrix')
(f, outAll) = plot_silhouette(distance, labels, ax=None,
permutation_test=True,
n_permute=5000, **kwargs)
return (f,outAll)
def bootstrap(self, function, n_samples=5000, save_weights=False,
n_jobs=-1, random_state=None, *args, **kwargs):
'''Bootstrap an Adjacency method.
Example Useage:
b = dat.bootstrap('mean', n_samples=5000)
b = dat.bootstrap('predict', n_samples=5000, algorithm='ridge')
b = dat.bootstrap('predict', n_samples=5000, save_weights=True)
Args:
function: (str) method to apply to data for each bootstrap
n_samples: (int) number of samples to bootstrap with replacement
save_weights: (bool) Save each bootstrap iteration
(useful for aggregating many bootstraps on a cluster)
n_jobs: (int) The number of CPUs to use to do the computation.
-1 means all CPUs.Returns:
output: summarized studentized bootstrap output
'''
random_state = check_random_state(random_state)
seeds = random_state.randint(MAX_INT, size=n_samples)
bootstrapped = Parallel(n_jobs=n_jobs)(
delayed(_bootstrap_apply_func)(self,
function, random_state=seeds[i], *args, **kwargs)
for i in range(n_samples))
bootstrapped = Adjacency(bootstrapped)
return summarize_bootstrap(bootstrapped, save_weights=save_weights)
def plot_mds(self, n_components=2, metric=True, labels_color=None,
cmap=plt.cm.hot_r, n_jobs=-1, view=(30, 20),
figsize = [12,8], ax = None, *args, **kwargs):
''' Plot Multidimensional Scaling
Args:
n_components: (int) Number of dimensions to project (can be 2 or 3)
metric: (bool) Perform metric or non-metric dimensional scaling; default
labels_color: (str) list of colors for labels, if len(1) then make all same color
n_jobs: (int) Number of parallel jobs
view: (tuple) view for 3-Dimensional plot; default (30,20)
Returns:
fig: returns matplotlib figure
'''
if self.matrix_type != 'distance':
raise ValueError("MDS only works on distance matrices.")
if not self.is_single_matrix:
raise ValueError("MDS only works on single matrices.")
if n_components not in [2,3]:
raise ValueError('Cannot plot {0}-d image'.format(n_components))
if labels_color is not None:
if self.labels is None:
raise ValueError("Make sure that Adjacency object has labels specified.")
if len(self.labels) != len(labels_color):
raise ValueError("Length of labels_color must match self.labels.")
# Run MDS
mds = MDS(n_components=n_components, metric=metric, n_jobs=n_jobs,
dissimilarity="precomputed", *args, **kwargs)
proj = mds.fit_transform(self.squareform())
# Create Plot
if ax == None: # Create axis
returnFig = True
fig = plt.figure(figsize=figsize)
if n_components == 3:
ax = fig.add_subplot(111, projection='3d')
ax.view_init(*view)
elif n_components == 2:
ax = fig.add_subplot(111)
# Plot dots
if n_components == 3:
ax.scatter(proj[:, 0], proj[:, 1], proj[:, 2], s=1, c='k')
elif n_components == 2:
ax.scatter(proj[:, 0], proj[:, 1], s=1, c='k')
# Plot labels
if labels_color is None:
labels_color = ['black'] * len(self.labels)
if n_components == 3:
for ((x, y, z), label, color) in zip(proj, self.labels, labels_color):
ax.text(x, y, z, label, color='white', #color,
bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
else:
for ((x, y), label, color) in zip(proj, self.labels, labels_color):
ax.text(x, y, label, color='white', #color,
bbox=dict(facecolor=color, alpha=1, boxstyle="round,pad=0.3"))
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if returnFig:
return fig
def distance_to_similarity(self, beta=1):
'''Convert distance matrix to similarity matrix
Args:
beta: parameter to scale exponential function (default: 1)
Returns:
Adjacency object
'''
if self.matrix_type == 'distance':
return Adjacency(np.exp(-beta*self.squareform()/self.squareform().std()),
labels=self.labels, matrix_type='similarity')
else:
raise ValueError('Matrix is not a distance matrix.')
def similarity_to_distance(self):
'''Convert similarity matrix to distance matrix'''
if self.matrix_type == 'similarity':
return Adjacency(1-self.squareform(),
labels=self.labels, matrix_type='distance')
else:
raise ValueError('Matrix is not a similarity matrix.')
def within_cluster_mean(self, clusters = None):
''' This function calculates mean within cluster labels
Args:
clusters: list of cluster labels
Returns:
dict: within cluster means
'''
distance=pd.DataFrame(self.squareform())
clusters = np.array(clusters)
if len(clusters) != distance.shape[0]:
raise ValueError('Cluster labels must be same length as distance matrix')
out = pd.DataFrame(columns=['Mean','Label'],index=None)
out = {}
for i in list(set(clusters)):
out[i] = np.mean(distance.loc[clusters==i,clusters==i].values[np.triu_indices(sum(clusters==i),k=1)])
return out
def regress(self, X, mode='ols', **kwargs):
''' Run a regression on an adjacency instance.
You can decompose an adjacency instance with another adjacency instance.
You can also decompose each pixel by passing a design_matrix instance.
Args:
X: Design matrix can be an Adjacency or Design_Matrix instance
method: type of regression (default: ols)
Returns:
'''
stats = {}
if isinstance(X, Adjacency):
if X.square_shape()[0] != self.square_shape()[0]:
raise ValueError('Adjacency instances must be the same size.')
b,t,p,_,res = regression(X.data.T, self.data, mode=mode, **kwargs)
stats['beta'],stats['t'],stats['p'],stats['residual'] = (b,t,p,res)
elif isinstance(X, Design_Matrix):
if X.shape[0] != len(self):
raise ValueError('Design matrix must have same number of observations as Adjacency')
b,t,p,df,res = regression(X, self.data, mode=mode, **kwargs)
mode = 'ols'
stats['beta'], stats['t'], stats['p'] = [x for x in self[:3]]
stats['beta'].data, stats['t'].data, stats['p'].data = b.squeeze(), t.squeeze(), p.squeeze()
stats['residual'] = self.copy()
stats['residual'].data = res
else:
raise ValueError('X must be a Design_Matrix or Adjacency Instance.')
return stats
| mit |
tmhm/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/tree/plot_iris.py | 30 | 2062 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "ryb"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.RdYlBu, edgecolor='black', s=15)
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend(loc='lower right', borderpad=0, handletextpad=0)
plt.axis("tight")
plt.show()
| bsd-3-clause |
erh3cq/hyperspy | hyperspy/_signals/signal1d.py | 2 | 61717 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import math
import matplotlib.pyplot as plt
import numpy as np
import dask.array as da
import scipy.interpolate
import scipy as sp
from scipy.signal import savgol_filter
from scipy.ndimage.filters import gaussian_filter1d
from hyperspy.signal import BaseSignal
from hyperspy._signals.common_signal1d import CommonSignal1D
from hyperspy.signal_tools import SpikesRemoval, SpikesRemovalInteractive
from hyperspy.models.model1d import Model1D
from hyperspy.misc.lowess_smooth import lowess
from hyperspy.defaults_parser import preferences
from hyperspy.signal_tools import (
Signal1DCalibration,
SmoothingSavitzkyGolay,
SmoothingLowess,
SmoothingTV,
ButterworthFilter)
from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT
from hyperspy.misc.tv_denoise import _tv_denoise_1d
from hyperspy.signal_tools import BackgroundRemoval
from hyperspy.decorators import interactive_range_selector
from hyperspy.signal_tools import IntegrateArea, _get_background_estimator
from hyperspy._signals.lazy import LazySignal
from hyperspy.docstrings.signal1d import CROP_PARAMETER_DOC, SPIKES_REMOVAL_TOOL_DOCSTRING
from hyperspy.docstrings.signal import (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG,
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
from hyperspy.docstrings.plot import (
BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS, PLOT1D_DOCSTRING)
_logger = logging.getLogger(__name__)
def find_peaks_ohaver(y, x=None, slope_thresh=0., amp_thresh=None,
medfilt_radius=5, maxpeakn=30000, peakgroup=10,
subchannel=True,):
"""Find peaks along a 1D line.
Function to locate the positive peaks in a noisy x-y data set.
Detects peaks by looking for downward zero-crossings in the first
derivative that exceed 'slope_thresh'.
Returns an array containing position, height, and width of each peak.
Sorted by position.
'slope_thresh' and 'amp_thresh', control sensitivity: higher values
will neglect wider peaks (slope) and smaller features (amp),
respectively.
Parameters
----------
y : array
1D input array, e.g. a spectrum
x : array (optional)
1D array describing the calibration of y (must have same shape as y)
slope_thresh : float (optional)
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float (optional)
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10% of max(y).
medfilt_radius : int (optional)
median filter window to apply to smooth the data
(see scipy.signal.medfilt);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int (optional)
number of points around the "top part" of the peak that
are taken to estimate the peak height; for spikes or
very narrow peaks, keep PeakGroup=1 or 2; for broad or
noisy peaks, make PeakGroup larger to reduce the effect
of noise;
default is set to 10.
maxpeakn : int (optional)
number of maximum detectable peaks;
default is set to 30000.
subchannel : bool (optional)
default is set to True.
Returns
-------
P : structured array of shape (npeaks)
contains fields: 'position', 'width', and 'height' for each peak.
Examples
--------
>>> x = np.arange(0,50,0.01)
>>> y = np.cos(x)
>>> peaks = find_peaks_ohaver(y, x, 0, 0)
Notes
-----
Original code from T. C. O'Haver, 1995.
Version 2 Last revised Oct 27, 2006 Converted to Python by
Michael Sarahan, Feb 2011.
Revised to handle edges better. MCS, Mar 2011
"""
if x is None:
x = np.arange(len(y), dtype=np.int64)
if not amp_thresh:
amp_thresh = 0.1 * y.max()
peakgroup = np.round(peakgroup)
if medfilt_radius:
d = np.gradient(scipy.signal.medfilt(y, medfilt_radius))
else:
d = np.gradient(y)
n = np.round(peakgroup / 2 + 1)
peak_dt = np.dtype([('position', np.float),
('height', np.float),
('width', np.float)])
P = np.array([], dtype=peak_dt)
peak = 0
for j in range(len(y) - 4):
if np.sign(d[j]) > np.sign(d[j + 1]): # Detects zero-crossing
if np.sign(d[j + 1]) == 0:
continue
# if slope of derivative is larger than slope_thresh
if d[j] - d[j + 1] > slope_thresh:
# if height of peak is larger than amp_thresh
if y[j] > amp_thresh:
# the next section is very slow, and actually messes
# things up for images (discrete pixels),
# so by default, don't do subchannel precision in the
# 1D peakfind step.
if subchannel:
xx = np.zeros(peakgroup)
yy = np.zeros(peakgroup)
s = 0
for k in range(peakgroup):
groupindex = int(j + k - n + 1)
if groupindex < 1:
xx = xx[1:]
yy = yy[1:]
s += 1
continue
elif groupindex > y.shape[0] - 1:
xx = xx[:groupindex - 1]
yy = yy[:groupindex - 1]
break
xx[k - s] = x[groupindex]
yy[k - s] = y[groupindex]
avg = np.average(xx)
stdev = np.std(xx)
xxf = (xx - avg) / stdev
# Fit parabola to log10 of sub-group with
# centering and scaling
yynz = yy != 0
coef = np.polyfit(
xxf[yynz], np.log10(np.abs(yy[yynz])), 2)
c1 = coef[2]
c2 = coef[1]
c3 = coef[0]
with np.errstate(invalid='ignore'):
width = np.linalg.norm(stdev * 2.35703 /
(np.sqrt(2) * np.sqrt(-1 *
c3)))
# if the peak is too narrow for least-squares
# technique to work well, just use the max value
# of y in the sub-group of points near peak.
if peakgroup < 7:
height = np.max(yy)
position = xx[np.argmin(np.abs(yy - height))]
else:
position = - ((stdev * c2 / (2 * c3)) - avg)
height = np.exp(c1 - c3 * (c2 / (2 * c3)) ** 2)
# Fill results array P. One row for each peak
# detected, containing the
# peak position (x-value) and peak height (y-value).
else:
position = x[j]
height = y[j]
# no way to know peak width without
# the above measurements.
width = 0
if (not np.isnan(position) and 0 < position < x[-1]):
P = np.hstack((P,
np.array([(position, height, width)],
dtype=peak_dt)))
peak += 1
# return only the part of the array that contains peaks
# (not the whole maxpeakn x 3 array)
if len(P) > maxpeakn:
minh = np.sort(P['height'])[-maxpeakn]
P = P[P['height'] >= minh]
# Sorts the values as a function of position
P.sort(0)
return P
def interpolate1D(number_of_interpolation_points, data):
ip = number_of_interpolation_points
ch = len(data)
old_ax = np.linspace(0, 100, ch)
new_ax = np.linspace(0, 100, ch * ip - (ip - 1))
interpolator = scipy.interpolate.interp1d(old_ax, data)
return interpolator(new_ax)
def _estimate_shift1D(data, **kwargs):
mask = kwargs.get('mask', None)
ref = kwargs.get('ref', None)
interpolate = kwargs.get('interpolate', True)
ip = kwargs.get('ip', 5)
data_slice = kwargs.get('data_slice', slice(None))
if bool(mask):
# asarray is required for consistensy as argmax
# returns a numpy scalar array
return np.asarray(np.nan)
data = data[data_slice]
if interpolate is True:
data = interpolate1D(ip, data)
return np.argmax(np.correlate(ref, data, 'full')) - len(ref) + 1
def _shift1D(data, **kwargs):
shift = kwargs.get('shift', 0.)
original_axis = kwargs.get('original_axis', None)
fill_value = kwargs.get('fill_value', np.nan)
kind = kwargs.get('kind', 'linear')
offset = kwargs.get('offset', 0.)
scale = kwargs.get('scale', 1.)
size = kwargs.get('size', 2)
if np.isnan(shift) or shift == 0:
return data
axis = np.linspace(offset, offset + scale * (size - 1), size)
si = sp.interpolate.interp1d(original_axis,
data,
bounds_error=False,
fill_value=fill_value,
kind=kind)
offset = float(offset - shift)
axis = np.linspace(offset, offset + scale * (size - 1), size)
return si(axis)
class Signal1D(BaseSignal, CommonSignal1D):
"""
"""
_signal_dimension = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.axes_manager.signal_dimension != 1:
self.axes_manager.set_signal_dimension(1)
def _get_spikes_diagnosis_histogram_data(self, signal_mask=None,
navigation_mask=None,
**kwargs):
self._check_signal_dimension_equals_one()
dc = self.data
if signal_mask is not None:
dc = dc[..., ~signal_mask]
if navigation_mask is not None:
dc = dc[~navigation_mask, :]
der = np.abs(np.diff(dc, 1, -1))
n = ((~navigation_mask).sum() if navigation_mask else
self.axes_manager.navigation_size)
# arbitrary cutoff for number of spectra necessary before histogram
# data is compressed by finding maxima of each spectrum
tmp = BaseSignal(der) if n < 2000 else BaseSignal(
np.ravel(der.max(-1)))
# get histogram signal using smart binning and plot
return tmp.get_histogram(**kwargs)
def spikes_diagnosis(self, signal_mask=None,
navigation_mask=None,
**kwargs):
"""Plots a histogram to help in choosing the threshold for
spikes removal.
Parameters
----------
%s
%s
**kwargs : dict
Keyword arguments pass to
:py:meth:`~hyperspy.signal.signal.BaseSignal.get_histogram`
See also
--------
spikes_removal_tool
"""
tmph = self._get_spikes_diagnosis_histogram_data(signal_mask,
navigation_mask,
**kwargs)
tmph.plot()
# Customize plot appearance
plt.gca().set_title('')
plt.gca().fill_between(tmph.axes_manager[0].axis,
tmph.data,
facecolor='#fddbc7',
interpolate=True,
color='none')
ax = tmph._plot.signal_plot.ax
axl = tmph._plot.signal_plot.ax_lines[0]
axl.set_line_properties(color='#b2182b')
plt.xlabel('Derivative magnitude')
plt.ylabel('Log(Counts)')
ax.set_yscale('log')
ax.set_ylim(10 ** -1, plt.ylim()[1])
ax.set_xlim(plt.xlim()[0], 1.1 * plt.xlim()[1])
plt.draw()
spikes_diagnosis.__doc__ %= (SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG)
def spikes_removal_tool(self, signal_mask=None, navigation_mask=None,
threshold='auto', interactive=True,
display=True, toolkit=None):
self._check_signal_dimension_equals_one()
if interactive:
sr = SpikesRemovalInteractive(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold)
return sr.gui(display=display, toolkit=toolkit)
else:
SpikesRemoval(self,
signal_mask=signal_mask,
navigation_mask=navigation_mask,
threshold=threshold)
spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (
SIGNAL_MASK_ARG, NAVIGATION_MASK_ARG, "", DISPLAY_DT, TOOLKIT_DT)
def create_model(self, dictionary=None):
"""Create a model for the current data.
Returns
-------
model : `Model1D` instance.
"""
model = Model1D(self, dictionary=dictionary)
return model
def shift1D(
self,
shift_array,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Shift the data in place over the signal axis by the amount specified
by an array.
Parameters
----------
shift_array : numpy array
An array containing the shifting amount. It must have
`axes_manager._navigation_shape_in_array` shape.
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.any(shift_array):
# Nothing to do, the shift array if filled with zeros
return
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
# Figure out min/max shifts, and translate to shifts in index as well
minimum, maximum = np.nanmin(shift_array), np.nanmax(shift_array)
if minimum < 0:
ihigh = 1 + axis.value2index(
axis.high_value + minimum,
rounding=math.floor)
else:
ihigh = axis.high_index + 1
if maximum > 0:
ilow = axis.value2index(axis.offset + maximum,
rounding=math.ceil)
else:
ilow = axis.low_index
if expand:
if self._lazy:
ind = axis.index_in_array
pre_shape = list(self.data.shape)
post_shape = list(self.data.shape)
pre_chunks = list(self.data.chunks)
post_chunks = list(self.data.chunks)
pre_shape[ind] = axis.high_index - ihigh + 1
post_shape[ind] = ilow - axis.low_index
for chunks, shape in zip((pre_chunks, post_chunks),
(pre_shape, post_shape)):
maxsize = min(np.max(chunks[ind]), shape[ind])
num = np.ceil(shape[ind] / maxsize)
chunks[ind] = tuple(len(ar) for ar in
np.array_split(np.arange(shape[ind]),
num))
pre_array = da.full(tuple(pre_shape),
fill_value,
chunks=tuple(pre_chunks))
post_array = da.full(tuple(post_shape),
fill_value,
chunks=tuple(post_chunks))
self.data = da.concatenate((pre_array, self.data, post_array),
axis=ind)
else:
padding = []
for i in range(self.data.ndim):
if i == axis.index_in_array:
padding.append((axis.high_index - ihigh + 1,
ilow - axis.low_index))
else:
padding.append((0, 0))
self.data = np.pad(self.data, padding, mode='constant',
constant_values=(fill_value,))
axis.offset += minimum
axis.size += axis.high_index - ihigh + 1 + ilow - axis.low_index
self._map_iterate(_shift1D, (('shift', shift_array.ravel()),),
original_axis=axis.axis,
fill_value=fill_value,
kind=interpolation_method,
offset=axis.offset,
scale=axis.scale,
size=axis.size,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers,
ragged=False)
if crop and not expand:
_logger.debug("Cropping %s from index %i to %i"
% (self, ilow, ihigh))
self.crop(axis.index_in_axes_manager,
ilow,
ihigh)
self.events.data_changed.trigger(obj=self)
shift1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def interpolate_in_between(
self,
start,
end,
delta=3,
show_progressbar=None,
parallel=None,
max_workers=None,
**kwargs,
):
"""Replace the data in a given range by interpolation.
The operation is performed in place.
Parameters
----------
start, end : int or float
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
delta : int or float
The windows around the (start, end) to use for interpolation
%s
%s
%s
**kwargs :
All extra keyword arguments are passed to
:py:func:`scipy.interpolate.interp1d`. See the function documentation
for details.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
i1 = axis._get_index(start)
i2 = axis._get_index(end)
if isinstance(delta, float):
delta = int(delta / axis.scale)
i0 = int(np.clip(i1 - delta, 0, np.inf))
i3 = int(np.clip(i2 + delta, 0, axis.size))
def interpolating_function(dat):
dat_int = sp.interpolate.interp1d(
list(range(i0, i1)) + list(range(i2, i3)),
dat[i0:i1].tolist() + dat[i2:i3].tolist(),
**kwargs)
dat[i1:i2] = dat_int(list(range(i1, i2)))
return dat
self._map_iterate(interpolating_function,
ragged=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=max_workers)
self.events.data_changed.trigger(obj=self)
interpolate_in_between.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_shift1D(
self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
mask=None,
show_progressbar=None,
parallel=None,
max_workers=None,
):
"""Estimate the shifts in the current signal axis using
cross-correlation.
This method can only estimate the shift by comparing
unidimensional features that should not change the position in
the signal axis. To decrease the memory usage, the time of
computation and the accuracy of the results it is convenient to
select the feature of interest providing sensible values for
`start` and `end`. By default interpolation is used to obtain
subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
mask : `BaseSignal` of bool.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
%s
%s
Returns
-------
An array with the result of the estimation in the axis units.
Although the computation is performed in batches if the signal is
lazy, the result is computed in memory because it depends on the
current state of the axes that could change later on in the workflow.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ip = number_of_interpolation_points + 1
axis = self.axes_manager.signal_axes[0]
self._check_navigation_mask(mask)
# we compute for now
if isinstance(start, da.Array):
start = start.compute()
if isinstance(end, da.Array):
end = end.compute()
i1, i2 = axis._get_index(start), axis._get_index(end)
if reference_indices is None:
reference_indices = self.axes_manager.indices
ref = self.inav[reference_indices].data[i1:i2]
if interpolate is True:
ref = interpolate1D(ip, ref)
iterating_kwargs = ()
if mask is not None:
iterating_kwargs += (('mask', mask),)
shift_signal = self._map_iterate(
_estimate_shift1D,
iterating_kwargs=iterating_kwargs,
data_slice=slice(i1, i2),
ref=ref,
ip=ip,
interpolate=interpolate,
ragged=False,
parallel=parallel,
inplace=False,
show_progressbar=show_progressbar,
max_workers=max_workers,
)
shift_array = shift_signal.data
if max_shift is not None:
if interpolate is True:
max_shift *= ip
shift_array.clip(-max_shift, max_shift)
if interpolate is True:
shift_array = shift_array / ip
shift_array *= axis.scale
if self._lazy:
# We must compute right now because otherwise any changes to the
# axes_manager of the signal later in the workflow may result in
# a wrong shift_array
shift_array = shift_array.compute()
return shift_array
estimate_shift1D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def align1D(self,
start=None,
end=None,
reference_indices=None,
max_shift=None,
interpolate=True,
number_of_interpolation_points=5,
interpolation_method='linear',
crop=True,
expand=False,
fill_value=np.nan,
also_align=None,
mask=None,
show_progressbar=None):
"""Estimate the shifts in the signal axis using
cross-correlation and use the estimation to align the data in place.
This method can only estimate the shift by comparing
unidimensional
features that should not change the position.
To decrease memory usage, time of computation and improve
accuracy it is convenient to select the feature of interest
setting the `start` and `end` keywords. By default interpolation is
used to obtain subpixel precision.
Parameters
----------
start, end : int, float or None
The limits of the interval. If int they are taken as the
axis index. If float they are taken as the axis value.
reference_indices : tuple of ints or None
Defines the coordinates of the spectrum that will be used
as eference. If None the spectrum at the current
coordinates is used for this purpose.
max_shift : int
"Saturation limit" for the shift.
interpolate : bool
If True, interpolation is used to provide sub-pixel
accuracy.
number_of_interpolation_points : int
Number of interpolation points. Warning: making this number
too big can saturate the memory
interpolation_method : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or as an
integer specifying the order of the spline interpolator to
use.
%s
expand : bool
If True, the data will be expanded to fit all data after alignment.
Overrides `crop`.
fill_value : float
If crop is False fill the data outside of the original
interval with the given value where needed.
also_align : list of signals, None
A list of BaseSignal instances that has exactly the same
dimensions as this one and that will be aligned using the shift map
estimated using the this signal.
mask : `BaseSignal` or bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
%s
Returns
-------
An array with the result of the estimation.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
See also
--------
estimate_shift1D
"""
if also_align is None:
also_align = []
self._check_signal_dimension_equals_one()
if self._lazy:
_logger.warning('In order to properly expand, the lazy '
'reference signal will be read twice (once to '
'estimate shifts, and second time to shift '
'appropriatelly), which might take a long time. '
'Use expand=False to only pass through the data '
'once.')
shift_array = self.estimate_shift1D(
start=start,
end=end,
reference_indices=reference_indices,
max_shift=max_shift,
interpolate=interpolate,
number_of_interpolation_points=number_of_interpolation_points,
mask=mask,
show_progressbar=show_progressbar)
signals_to_shift = [self] + also_align
for signal in signals_to_shift:
signal.shift1D(shift_array=shift_array,
interpolation_method=interpolation_method,
crop=crop,
fill_value=fill_value,
expand=expand,
show_progressbar=show_progressbar)
align1D.__doc__ %= (CROP_PARAMETER_DOC, SHOW_PROGRESSBAR_ARG)
def integrate_in_range(self, signal_range='interactive',
display=True, toolkit=None):
"""Sums the spectrum over an energy range, giving the integrated
area.
The energy range can either be selected through a GUI or the command
line.
Parameters
----------
signal_range : a tuple of this form (l, r) or "interactive"
l and r are the left and right limits of the range. They can be
numbers or None, where None indicates the extremes of the interval.
If l and r are floats the `signal_range` will be in axis units (for
example eV). If l and r are integers the `signal_range` will be in
index units. When `signal_range` is "interactive" (default) the
range is selected using a GUI. Note that ROIs can be used
in place of a tuple.
Returns
--------
integrated_spectrum : `BaseSignal` subclass
See Also
--------
integrate_simpson
Examples
--------
Using the GUI
>>> s = hs.signals.Signal1D(range(1000))
>>> s.integrate_in_range() #doctest: +SKIP
Using the CLI
>>> s_int = s.integrate_in_range(signal_range=(560,None))
Selecting a range in the axis units, by specifying the
signal range with floats.
>>> s_int = s.integrate_in_range(signal_range=(560.,590.))
Selecting a range using the index, by specifying the
signal range with integers.
>>> s_int = s.integrate_in_range(signal_range=(100,120))
"""
from hyperspy.misc.utils import deprecation_warning
msg = (
"The `Signal1D.integrate_in_range` method is deprecated and will "
"be removed in v2.0. Use a `roi.SpanRoi` followed by `integrate1D` "
"instead.")
deprecation_warning(msg)
if signal_range == 'interactive':
self_copy = self.deepcopy()
ia = IntegrateArea(self_copy, signal_range)
ia.gui(display=display, toolkit=toolkit)
integrated_signal1D = self_copy
else:
integrated_signal1D = self._integrate_in_range_commandline(
signal_range)
return integrated_signal1D
def _integrate_in_range_commandline(self, signal_range):
e1 = signal_range[0]
e2 = signal_range[1]
integrated_signal1D = self.isig[e1:e2].integrate1D(-1)
return integrated_signal1D
def calibrate(self, display=True, toolkit=None):
"""
Calibrate the spectral dimension using a gui.
It displays a window where the new calibration can be set by:
* setting the values of offset, units and scale directly
* or selecting a range by dragging the mouse on the spectrum figure
and setting the new values for the given range limits
Parameters
----------
%s
%s
Notes
-----
For this method to work the output_dimension must be 1.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
calibration = Signal1DCalibration(self)
return calibration.gui(display=display, toolkit=toolkit)
calibrate.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def smooth_savitzky_golay(
self,
polynomial_order=None,
window_length=None,
differential_order=0,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Apply a Savitzky-Golay filter to the data in place.
If `polynomial_order` or `window_length` or `differential_order` are
None the method is run in interactive mode.
Parameters
----------
polynomial_order : int, optional
The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
window_length : int, optional
The length of the filter window (i.e. the number of coefficients).
`window_length` must be a positive odd integer.
differential_order: int, optional
The order of the derivative to compute. This must be a
nonnegative integer. The default is 0, which means to filter
the data without differentiating.
%s
%s
%s
%s
Notes
-----
More information about the filter in `scipy.signal.savgol_filter`.
"""
self._check_signal_dimension_equals_one()
if (polynomial_order is not None and
window_length is not None):
axis = self.axes_manager.signal_axes[0]
self.map(savgol_filter, window_length=window_length,
polyorder=polynomial_order, deriv=differential_order,
delta=axis.scale, ragged=False, parallel=parallel, max_workers=max_workers)
else:
# Interactive mode
smoother = SmoothingSavitzkyGolay(self)
smoother.differential_order = differential_order
if polynomial_order is not None:
smoother.polynomial_order = polynomial_order
if window_length is not None:
smoother.window_length = window_length
return smoother.gui(display=display, toolkit=toolkit)
smooth_savitzky_golay.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_lowess(
self,
smoothing_parameter=None,
number_of_iterations=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Lowess data smoothing in place.
If `smoothing_parameter` or `number_of_iterations` are None the method
is run in interactive mode.
Parameters
----------
smoothing_parameter: float or None
Between 0 and 1. The fraction of the data used
when estimating each y-value.
number_of_iterations: int or None
The number of residual-based reweightings
to perform.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None or number_of_iterations is None:
smoother = SmoothingLowess(self)
if smoothing_parameter is not None:
smoother.smoothing_parameter = smoothing_parameter
if number_of_iterations is not None:
smoother.number_of_iterations = number_of_iterations
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(lowess,
x=self.axes_manager[-1].axis,
f=smoothing_parameter,
n_iter=number_of_iterations,
show_progressbar=show_progressbar,
ragged=False,
parallel=parallel,
max_workers=max_workers)
smooth_lowess.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def smooth_tv(
self,
smoothing_parameter=None,
show_progressbar=None,
parallel=None,
max_workers=None,
display=True,
toolkit=None,
):
"""
Total variation data smoothing in place.
Parameters
----------
smoothing_parameter: float or None
Denoising weight relative to L2 minimization. If None the method
is run in interactive mode.
%s
%s
%s
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if smoothing_parameter is None:
smoother = SmoothingTV(self)
return smoother.gui(display=display, toolkit=toolkit)
else:
self.map(_tv_denoise_1d, weight=smoothing_parameter,
ragged=False,
show_progressbar=show_progressbar,
parallel=parallel,
max_workers=max_workers)
smooth_tv.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT)
def filter_butterworth(self,
cutoff_frequency_ratio=None,
type='low',
order=2, display=True, toolkit=None):
"""
Butterworth filter in place.
Parameters
----------
%s
%s
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
smoother = ButterworthFilter(self)
if cutoff_frequency_ratio is not None:
smoother.cutoff_frequency_ratio = cutoff_frequency_ratio
smoother.type = type
smoother.order = order
smoother.apply()
else:
return smoother.gui(display=display, toolkit=toolkit)
filter_butterworth.__doc__ %= (DISPLAY_DT, TOOLKIT_DT)
def _remove_background_cli(
self, signal_range, background_estimator, fast=True,
zero_fill=False, show_progressbar=None, model=None,
return_model=False):
""" See :py:meth:`~hyperspy._signal1d.signal1D.remove_background`. """
if model is None:
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if background_estimator not in model:
model.append(background_estimator)
background_estimator.estimate_parameters(
self,
signal_range[0],
signal_range[1],
only_current=False)
if not fast:
model.set_signal_range(signal_range[0], signal_range[1])
model.multifit(show_progressbar=show_progressbar,
iterpath='serpentine')
model.reset_signal_range()
if self._lazy:
result = self - model.as_signal(show_progressbar=show_progressbar)
else:
try:
axis = self.axes_manager.signal_axes[0]
scale_factor = axis.scale if self.metadata.Signal.binned else 1
bkg = background_estimator.function_nd(axis.axis) * scale_factor
result = self - bkg
except MemoryError:
result = self - model.as_signal(
show_progressbar=show_progressbar)
if zero_fill:
if self._lazy:
low_idx = result.axes_manager[-1].value2index(signal_range[0])
z = da.zeros(low_idx, chunks=(low_idx,))
cropped_da = result.data[low_idx:]
result.data = da.concatenate([z, cropped_da])
else:
result.isig[:signal_range[0]] = 0
if return_model:
if fast:
# Calculate the variance for each navigation position only when
# using fast, otherwise the chisq is already calculated when
# doing the multifit
d = result.data[..., np.where(model.channel_switches)[0]]
variance = model._get_variance(only_current=False)
d *= d / (1. * variance) # d = difference^2 / variance.
model.chisq.data = d.sum(-1)
result = (result, model)
return result
def remove_background(
self,
signal_range='interactive',
background_type='Power law',
polynomial_order=2,
fast=True,
zero_fill=False,
plot_remainder=True,
show_progressbar=None,
return_model=False,
display=True,
toolkit=None):
"""
Remove the background, either in place using a GUI or returned as a new
spectrum using the command line. The fast option is not accurate for
most background types - except Gaussian, Offset and
Power law - but it is useful to estimate the initial fitting parameters
before performing a full fit.
Parameters
----------
signal_range : "interactive", tuple of ints or floats, optional
If this argument is not specified, the signal range has to be
selected using a GUI. And the original spectrum will be replaced.
If tuple is given, the a spectrum will be returned.
background_type : str
The type of component which should be used to fit the background.
Possible components: Doniach, Gaussian, Lorentzian, Offset,
Polynomial, PowerLaw, Exponential, SkewNormal, SplitVoigt, Voigt.
If Polynomial is used, the polynomial order can be specified
polynomial_order : int, default 2
Specify the polynomial order if a Polynomial background is used.
fast : bool
If True, perform an approximative estimation of the parameters.
If False, the signal is fitted using non-linear least squares
afterwards. This is slower compared to the estimation but
often more accurate.
zero_fill : bool
If True, all spectral channels lower than the lower bound of the
fitting range will be set to zero (this is the default behavior
of Gatan's DigitalMicrograph). Setting this value to False
allows for inspection of the quality of background fit throughout
the pre-fitting region.
plot_remainder : bool
If True, add a (green) line previewing the remainder signal after
background removal. This preview is obtained from a Fast calculation
so the result may be different if a NLLS calculation is finally
performed.
return_model : bool
If True, the background model is returned. The chi² can be obtained
from this model using
:py:meth:`~hyperspy.models.model1d.Model1D.chisqd`.
%s
%s
%s
Returns
-------
{None, signal, background_model or (signal, background_model)}
If signal_range is not 'interactive', the signal with background
substracted is returned. If return_model is True, returns the
background model, otherwise, the GUI widget dictionary is returned
if `display=False` - see the display parameter documentation.
Examples
--------
Using GUI, replaces spectrum s
>>> s = hs.signals.Signal1D(range(1000))
>>> s.remove_background() #doctest: +SKIP
Using command line, returns a Signal1D:
>>> s.remove_background(signal_range=(400,450),
background_type='PowerLaw')
<Signal1D, title: , dimensions: (|1000)>
Using a full model to fit the background:
>>> s.remove_background(signal_range=(400,450), fast=False)
<Signal1D, title: , dimensions: (|1000)>
Returns background substracted and the model:
>>> s.remove_background(signal_range=(400,450),
fast=False,
return_model=True)
(<Signal1D, title: , dimensions: (|1000)>, <Model1D>)
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
# Create model here, so that we can return it
from hyperspy.models.model1d import Model1D
model = Model1D(self)
if signal_range == 'interactive':
br = BackgroundRemoval(self, background_type=background_type,
polynomial_order=polynomial_order,
fast=fast,
plot_remainder=plot_remainder,
show_progressbar=show_progressbar,
zero_fill=zero_fill,
model=model)
gui_dict = br.gui(display=display, toolkit=toolkit)
if return_model:
return model
else:
# for testing purposes
return gui_dict
else:
background_estimator = _get_background_estimator(
background_type, polynomial_order)[0]
result = self._remove_background_cli(
signal_range=signal_range,
background_estimator=background_estimator,
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
model=model,
return_model=return_model)
return result
remove_background.__doc__ %= (SHOW_PROGRESSBAR_ARG, DISPLAY_DT, TOOLKIT_DT)
@interactive_range_selector
def crop_signal1D(self, left_value=None, right_value=None,):
"""Crop in place the spectral dimension.
Parameters
----------
left_value, righ_value : int, float or None
If int the values are taken as indices. If float they are
converted to indices using the spectral axis calibration.
If left_value is None crops from the beginning of the axis.
If right_value is None crops up to the end of the axis. If
both are
None the interactive cropping interface is activated
enabling
cropping the spectrum using a span selector in the signal
plot.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
try:
left_value, right_value = left_value
except TypeError:
# It was not a ROI, we carry on
pass
self.crop(axis=self.axes_manager.signal_axes[0].index_in_axes_manager,
start=left_value, end=right_value)
def gaussian_filter(self, FWHM):
"""Applies a Gaussian filter in the spectral dimension in place.
Parameters
----------
FWHM : float
The Full Width at Half Maximum of the gaussian in the
spectral axis units
Raises
------
ValueError
If FWHM is equal or less than zero.
SignalDimensionError
If the signal dimension is not 1.
"""
self._check_signal_dimension_equals_one()
if FWHM <= 0:
raise ValueError(
"FWHM must be greater than zero")
axis = self.axes_manager.signal_axes[0]
FWHM *= 1 / axis.scale
self.map(gaussian_filter1d, sigma=FWHM / 2.35482, ragged=False)
def hanning_taper(self, side='both', channels=None, offset=0):
"""Apply a hanning taper to the data in place.
Parameters
----------
side : 'left', 'right' or 'both'
Specify which side to use.
channels : None or int
The number of channels to taper. If None 5% of the total
number of channels are tapered.
offset : int
Returns
-------
channels
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
if not np.issubdtype(self.data.dtype, np.floating):
raise TypeError("The data dtype should be `float`. It can be "
"changed by using the `change_dtype('float')` "
"method of the signal.")
# TODO: generalize it
self._check_signal_dimension_equals_one()
if channels is None:
channels = int(round(len(self()) * 0.02))
if channels < 20:
channels = 20
dc = self._data_aligned_with_axes
if self._lazy and offset != 0:
shp = dc.shape
if len(shp) == 1:
nav_shape = ()
nav_chunks = ()
else:
nav_shape = shp[:-1]
nav_chunks = dc.chunks[:-1]
zeros = da.zeros(nav_shape + (offset,),
chunks=nav_chunks + ((offset,),))
if side == 'left' or side == 'both':
if self._lazy:
tapered = dc[..., offset:channels + offset]
tapered *= np.hanning(2 * channels)[:channels]
therest = dc[..., channels + offset:]
thelist = [] if offset == 0 else [zeros]
thelist.extend([tapered, therest])
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., offset:channels + offset] *= (
np.hanning(2 * channels)[:channels])
dc[..., :offset] *= 0.
if side == 'right' or side == 'both':
rl = None if offset == 0 else -offset
if self._lazy:
therest = dc[..., :-channels - offset]
tapered = dc[..., -channels - offset:rl]
tapered *= np.hanning(2 * channels)[-channels:]
thelist = [therest, tapered]
if offset != 0:
thelist.append(zeros)
dc = da.concatenate(thelist, axis=-1)
else:
dc[..., -channels - offset:rl] *= (
np.hanning(2 * channels)[-channels:])
if offset != 0:
dc[..., -offset:] *= 0.
if self._lazy:
self.data = dc
self.events.data_changed.trigger(obj=self)
return channels
def find_peaks1D_ohaver(self, xdim=None,
slope_thresh=0,
amp_thresh=None,
subchannel=True,
medfilt_radius=5,
maxpeakn=30000,
peakgroup=10,
parallel=None,
max_workers=None):
"""Find positive peaks along a 1D Signal. It detects peaks by looking
for downward zero-crossings in the first derivative that exceed
'slope_thresh'.
'slope_thresh' and 'amp_thresh', control sensitivity: higher
values will neglect broad peaks (slope) and smaller features (amp),
respectively.
`peakgroup` is the number of points around the top of the peak
that are taken to estimate the peak height. For spikes or very
narrow peaks, set `peakgroup` to 1 or 2; for broad or noisy peaks,
make `peakgroup` larger to reduce the effect of noise.
Parameters
----------
slope_thresh : float, optional
1st derivative threshold to count the peak;
higher values will neglect broader features;
default is set to 0.
amp_thresh : float, optional
intensity threshold below which peaks are ignored;
higher values will neglect smaller features;
default is set to 10%% of max(y).
medfilt_radius : int, optional
median filter window to apply to smooth the data
(see :py:func:`scipy.signal.medfilt`);
if 0, no filter will be applied;
default is set to 5.
peakgroup : int, optional
number of points around the "top part" of the peak
that are taken to estimate the peak height;
default is set to 10
maxpeakn : int, optional
number of maximum detectable peaks;
default is set to 5000.
subchannel : bool, default True
default is set to True.
%s
%s
Returns
-------
structured array of shape (npeaks) containing fields: 'position',
'width', and 'height' for each peak.
Raises
------
SignalDimensionError
If the signal dimension is not 1.
"""
# TODO: add scipy.signal.find_peaks_cwt
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0].axis
peaks = self.map(find_peaks_ohaver,
x=axis,
slope_thresh=slope_thresh,
amp_thresh=amp_thresh,
medfilt_radius=medfilt_radius,
maxpeakn=maxpeakn,
peakgroup=peakgroup,
subchannel=subchannel,
ragged=True,
parallel=parallel,
max_workers=max_workers,
inplace=False)
return peaks.data
find_peaks1D_ohaver.__doc__ %= (PARALLEL_ARG, MAX_WORKERS_ARG)
def estimate_peak_width(
self,
factor=0.5,
window=None,
return_interval=False,
parallel=None,
show_progressbar=None,
max_workers=None,
):
"""Estimate the width of the highest intensity of peak
of the spectra at a given fraction of its maximum.
It can be used with asymmetric peaks. For accurate results any
background must be previously substracted.
The estimation is performed by interpolation using cubic splines.
Parameters
----------
factor : 0 < float < 1
The default, 0.5, estimates the FWHM.
window : None or float
The size of the window centred at the peak maximum
used to perform the estimation.
The window size must be chosen with care: if it is narrower
than the width of the peak at some positions or if it is
so wide that it includes other more intense peaks this
method cannot compute the width and a NaN is stored instead.
return_interval: bool
If True, returns 2 extra signals with the positions of the
desired height fraction at the left and right of the
peak.
%s
%s
%s
Returns
-------
width or [width, left, right], depending on the value of
`return_interval`.
Notes
-----
Parallel operation of this function is not supported
on Windows platforms.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
if not 0 < factor < 1:
raise ValueError("factor must be between 0 and 1.")
if parallel != False and os.name in ["nt", "dos"]: # pragma: no cover
# Due to a scipy bug where scipy.interpolate.UnivariateSpline
# appears to not be thread-safe on Windows, we raise a warning
# here. See https://github.com/hyperspy/hyperspy/issues/2320
# Until/if the scipy bug is fixed, we should do this.
_logger.warning(
"Parallel operation is not supported on Windows. "
"Setting `parallel=False`"
)
parallel = False
axis = self.axes_manager.signal_axes[0]
# x = axis.axis
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and maxval > 0
def estimating_function(spectrum,
window=None,
factor=0.5,
axis=None):
x = axis.axis
if window is not None:
vmax = axis.index2value(spectrum.argmax())
slices = axis._get_array_slices(
slice(vmax - window * 0.5, vmax + window * 0.5))
spectrum = spectrum[slices]
x = x[slices]
spline = scipy.interpolate.UnivariateSpline(
x,
spectrum - factor * spectrum.max(),
s=0)
roots = spline.roots()
if len(roots) == 2:
return np.array(roots)
else:
return np.full((2,), np.nan)
both = self._map_iterate(estimating_function,
window=window,
factor=factor,
axis=axis,
ragged=False,
inplace=False,
parallel=parallel,
show_progressbar=show_progressbar,
max_workers=None)
left, right = both.T.split()
width = right - left
if factor == 0.5:
width.metadata.General.title = (
self.metadata.General.title + " FWHM")
left.metadata.General.title = (
self.metadata.General.title + " FWHM left position")
right.metadata.General.title = (
self.metadata.General.title + " FWHM right position")
else:
width.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum" % factor)
left.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum left position" % factor)
right.metadata.General.title = (
self.metadata.General.title +
" full-width at %.1f maximum right position" % factor)
for signal in (left, width, right):
signal.axes_manager.set_signal_dimension(0)
signal.set_signal_type("")
if return_interval is True:
return [width, left, right]
else:
return width
estimate_peak_width.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG)
def plot(self,
navigator="auto",
plot_markers=True,
autoscale='v',
norm="auto",
axes_manager=None,
navigator_kwds={},
**kwargs):
"""%s
%s
%s
"""
for c in autoscale:
if c not in ['x', 'v']:
raise ValueError("`autoscale` only accepts 'x', 'v' as "
"valid characters.")
super().plot(navigator=navigator,
plot_markers=plot_markers,
autoscale=autoscale,
norm=norm,
axes_manager=axes_manager,
navigator_kwds=navigator_kwds,
**kwargs)
plot.__doc__ %= (BASE_PLOT_DOCSTRING, BASE_PLOT_DOCSTRING_PARAMETERS,
PLOT1D_DOCSTRING)
class LazySignal1D(LazySignal, Signal1D):
"""
"""
_lazy = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axes_manager.set_signal_dimension(1)
| gpl-3.0 |
Obus/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | doc/sphinxext/gen_rst.py | 142 | 40026 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
charanpald/wallhack | wallhack/viroscopy/ContactGrowthStatistics.py | 1 | 49412 | import logging
import sys
import gc
import numpy
import os.path
import matplotlib.pyplot as plt
from datetime import date
from sandbox.util.PathDefaults import PathDefaults
from sandbox.util.DateUtils import DateUtils
from sandbox.util.Latex import Latex
from sandbox.util.Util import Util
from apgl.graph import *
from apgl.viroscopy.HIVGraphReader import HIVGraphReader
from apgl.viroscopy.HIVGraphStatistics import HIVGraphStatistics
"""
This script computes some basic statistics on the growing graph. We currently
combine both infection and detection graphs and hence
look at the contact graph.
"""
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=150)
hivReader = HIVGraphReader()
graph = hivReader.readHIVGraph()
fInds = hivReader.getIndicatorFeatureIndices()
#The set of edges indexed by zeros is the contact graph
#The ones indexed by 1 is the infection graph
edgeTypeIndex1 = 0
edgeTypeIndex2 = 1
sGraphContact = graph.getSparseGraph(edgeTypeIndex1)
sGraphInfect = graph.getSparseGraph(edgeTypeIndex2)
sGraphContact = sGraphContact.union(sGraphInfect)
sGraph = sGraphContact
#sGraph = sGraph.subgraph(range(0, 200))
figureDir = PathDefaults.getOutputDir() + "viroscopy/figures/contact/"
resultsDir = PathDefaults.getOutputDir() + "viroscopy/"
graphStats = GraphStatistics()
statsArray = graphStats.scalarStatistics(sGraph, False)
slowStats = True
saveResults = False
logging.info(sGraph)
logging.info("Number of features: " + str(sGraph.getVertexList().getNumFeatures()))
logging.info("Largest component is " + str(statsArray[graphStats.maxComponentSizeIndex]))
logging.info("Number of components " + str(statsArray[graphStats.numComponentsIndex]))
#sGraph = sGraph.subgraph(components[componentIndex])
vertexArray = sGraph.getVertexList().getVertices()
logging.info("Size of graph we will use: " + str(sGraph.getNumVertices()))
#Some indices
dobIndex = fInds["birthDate"]
detectionIndex = fInds["detectDate"]
deathIndex = fInds["deathDate"]
genderIndex = fInds["gender"]
orientationIndex = fInds["orient"]
ages = vertexArray[:, detectionIndex] - vertexArray[:, dobIndex]
deaths = vertexArray[:, deathIndex] - vertexArray[:, detectionIndex]
detections = vertexArray[:, detectionIndex]
startYear = 1900
daysInYear = 365
daysInMonth = 30
monthStep = 3
#Effective diameter q
q = 0.9
plotInd = 1
plotStyles = ['ko-', 'kx-', 'k+-', 'k.-', 'k*-']
plotStyles2 = ['k-', 'r-', 'g-', 'b-', 'c-', 'm-']
plotStyleBW = ['k-', 'k--', 'k-.', 'k:']
plotStyles4 = ['r-', 'r--', 'r-.', 'r:']
numConfigGraphs = 10
#Make sure we include all detections
dayList = range(int(numpy.min(detections)), int(numpy.max(detections)), daysInMonth*monthStep)
dayList.append(numpy.max(detections))
absDayList = [float(i-numpy.min(detections)) for i in dayList]
subgraphIndicesList = []
for i in dayList:
logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
subgraphIndices = numpy.nonzero(detections <= i)[0]
subgraphIndicesList.append(subgraphIndices)
#Compute the indices list for the vector statistics
dayList2 = [DateUtils.getDayDelta(date(1989, 12, 31), startYear)]
dayList2.append(DateUtils.getDayDelta(date(1993, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(1997, 12, 31), startYear))
dayList2.append(DateUtils.getDayDelta(date(2001, 12, 31), startYear))
dayList2.append(int(numpy.max(detections)))
subgraphIndicesList2 = []
for i in dayList2:
logging.info("Date: " + str(DateUtils.getDateStrFromDay(i, startYear)))
subgraphIndices = numpy.nonzero(detections <= i)[0]
subgraphIndicesList2.append(subgraphIndices)
#Locations and labels for years
locs = list(range(0, int(absDayList[-1]), daysInYear*2))
labels = numpy.arange(1986, 2006, 2)
#Some indices
contactIndex = fInds["contactTrace"]
donorIndex = fInds["donor"]
randomTestIndex = fInds["randomTest"]
stdIndex = fInds["STD"]
prisonerIndex = fInds["prisoner"]
doctorIndex = fInds["recommendVisit"]
#The most popular provinces
havanaIndex = fInds["CH"]
villaClaraIndex = fInds["VC"]
pinarIndex = fInds["PR"]
holguinIndex = fInds["HO"]
habanaIndex = fInds["LH"]
sanctiIndex = fInds["SS"]
santiagoIndex = fInds['SC']
camagueyIndex = fInds['CA']
def plotVertexStats():
#Calculate all vertex statistics
logging.info("Computing vertex stats")
#Indices
numContactsIndex = fInds["numContacts"]
numTestedIndex = fInds["numTested"]
numPositiveIndex = fInds["numPositive"]
#Properties of vertex values
detectionAges = []
deathAfterInfectAges = []
deathAges = []
homoMeans = []
maleSums = []
femaleSums = []
heteroSums = []
biSums = []
contactMaleSums = []
contactFemaleSums = []
contactHeteroSums = []
contactBiSums = []
doctorMaleSums = []
doctorFemaleSums = []
doctorHeteroSums = []
doctorBiSums = []
contactSums = []
nonContactSums = []
donorSums = []
randomTestSums = []
stdSums = []
prisonerSums = []
recommendSums = []
#This is: all detections - contact, donor, randomTest, str, recommend
otherSums = []
havanaSums = []
villaClaraSums = []
pinarSums = []
holguinSums = []
habanaSums = []
sanctiSums = []
numContactSums = []
numTestedSums = []
numPositiveSums = []
#Total number of sexual contacts
numContactMaleSums = []
numContactFemaleSums = []
numContactHeteroSums = []
numContactBiSums = []
numTestedMaleSums = []
numTestedFemaleSums = []
numTestedHeteroSums = []
numTestedBiSums = []
numPositiveMaleSums = []
numPositiveFemaleSums = []
numPositiveHeteroSums = []
numPositiveBiSums = []
propPositiveMaleSums = []
propPositiveFemaleSums = []
propPositiveHeteroSums = []
propPositiveBiSums = []
numContactVertices = []
numContactEdges = []
numInfectEdges = []
#Mean proportion of degree at end of epidemic
meanPropDegree = []
finalDegreeSequence = numpy.array(sGraph.outDegreeSequence(), numpy.float)
degreeOneSums = []
degreeTwoSums = []
degreeThreePlusSums = []
numProvinces = 15
provinceArray = numpy.zeros((len(subgraphIndicesList), numProvinces))
m = 0
for subgraphIndices in subgraphIndicesList:
subgraph = sGraph.subgraph(subgraphIndices)
infectSubGraph = sGraphInfect.subgraph(subgraphIndices)
subgraphVertexArray = subgraph.getVertexList().getVertices(range(subgraph.getNumVertices()))
detectionAges.append(numpy.mean((subgraphVertexArray[:, detectionIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
deathAfterInfectAges.append((numpy.mean(subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, detectionIndex]))/daysInYear)
deathAges.append(numpy.mean((subgraphVertexArray[:, deathIndex] - subgraphVertexArray[:, dobIndex]))/daysInYear)
homoMeans.append(numpy.mean(subgraphVertexArray[:, orientationIndex]))
nonContactSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, contactIndex]))
contactSums.append(numpy.sum(subgraphVertexArray[:, contactIndex]))
donorSums.append(numpy.sum(subgraphVertexArray[:, donorIndex]))
randomTestSums.append(numpy.sum(subgraphVertexArray[:, randomTestIndex]))
stdSums.append(numpy.sum(subgraphVertexArray[:, stdIndex]))
prisonerSums.append(numpy.sum(subgraphVertexArray[:, prisonerIndex]))
recommendSums.append(numpy.sum(subgraphVertexArray[:, doctorIndex]))
otherSums.append(subgraphVertexArray.shape[0] - numpy.sum(subgraphVertexArray[:, [contactIndex, donorIndex, randomTestIndex, stdIndex, doctorIndex]]))
heteroSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==0))
biSums.append(numpy.sum(subgraphVertexArray[:, orientationIndex]==1))
femaleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==1))
maleSums.append(numpy.sum(subgraphVertexArray[:, genderIndex]==0))
contactHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, contactIndex])))
contactBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, contactIndex])))
contactFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, contactIndex])))
contactMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, contactIndex])))
doctorHeteroSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==0, subgraphVertexArray[:, doctorIndex])))
doctorBiSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, orientationIndex]==1, subgraphVertexArray[:, doctorIndex])))
doctorFemaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==1, subgraphVertexArray[:, doctorIndex])))
doctorMaleSums.append(numpy.sum(numpy.logical_and(subgraphVertexArray[:, genderIndex]==0, subgraphVertexArray[:, doctorIndex])))
havanaSums.append(numpy.sum(subgraphVertexArray[:, havanaIndex]==1))
villaClaraSums.append(numpy.sum(subgraphVertexArray[:, villaClaraIndex]==1))
pinarSums.append(numpy.sum(subgraphVertexArray[:, pinarIndex]==1))
holguinSums.append(numpy.sum(subgraphVertexArray[:, holguinIndex]==1))
habanaSums.append(numpy.sum(subgraphVertexArray[:, habanaIndex]==1))
sanctiSums.append(numpy.sum(subgraphVertexArray[:, sanctiIndex]==1))
numContactSums.append(numpy.mean(subgraphVertexArray[:, numContactsIndex]))
numTestedSums.append(numpy.mean(subgraphVertexArray[:, numTestedIndex]))
numPositiveSums.append(numpy.mean(subgraphVertexArray[:, numPositiveIndex]))
numContactMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numContactsIndex]))
numContactFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numContactsIndex]))
numContactHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numContactsIndex]))
numContactBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numContactsIndex]))
numTestedMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numTestedIndex]))
numTestedFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numTestedIndex]))
numTestedHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numTestedIndex]))
numTestedBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numTestedIndex]))
numPositiveMaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==0, numPositiveIndex]))
numPositiveFemaleSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, genderIndex]==1, numPositiveIndex]))
numPositiveHeteroSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==0, numPositiveIndex]))
numPositiveBiSums.append(numpy.mean(subgraphVertexArray[subgraphVertexArray[:, orientationIndex]==1, numPositiveIndex]))
propPositiveMaleSums.append(numPositiveMaleSums[m]/float(numTestedMaleSums[m]))
propPositiveFemaleSums.append(numPositiveFemaleSums[m]/float(numTestedFemaleSums[m]))
propPositiveHeteroSums.append(numPositiveHeteroSums[m]/float(numTestedHeteroSums[m]))
propPositiveBiSums.append(numPositiveBiSums[m]/float(numTestedMaleSums[m]))
numContactVertices.append(subgraph.getNumVertices())
numContactEdges.append(subgraph.getNumEdges())
numInfectEdges.append(infectSubGraph.getNumEdges())
nonZeroInds = finalDegreeSequence[subgraphIndices]!=0
propDegrees = numpy.mean(subgraph.outDegreeSequence()[nonZeroInds]/finalDegreeSequence[subgraphIndices][nonZeroInds])
meanPropDegree.append(numpy.mean(propDegrees))
degreeOneSums.append(numpy.sum(subgraph.outDegreeSequence()==1))
degreeTwoSums.append(numpy.sum(subgraph.outDegreeSequence()==2))
degreeThreePlusSums.append(numpy.sum(subgraph.outDegreeSequence()>=3))
provinceArray[m, :] = numpy.sum(subgraphVertexArray[:, fInds["CA"]:fInds['VC']+1], 0)
m += 1
#Save some of the results for the ABC work
numStats = 2
vertexStatsArray = numpy.zeros((len(subgraphIndicesList), numStats))
vertexStatsArray[:, 0] = numpy.array(biSums)
vertexStatsArray[:, 1] = numpy.array(heteroSums)
resultsFileName = resultsDir + "ContactGrowthVertexStats.pkl"
Util.savePickle(vertexStatsArray, resultsFileName)
global plotInd
plt.figure(plotInd)
plt.plot(absDayList, detectionAges)
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detection Age (years)")
plt.savefig(figureDir + "DetectionMeansGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, heteroSums, 'k-', absDayList, biSums, 'k--', absDayList, femaleSums, 'k-.', absDayList, maleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, contactHeteroSums, 'k-', absDayList, contactBiSums, 'k--', absDayList, contactFemaleSums, 'k-.', absDayList, contactMaleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Contact tracing detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderContact.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, doctorHeteroSums, 'k-', absDayList, doctorBiSums, 'k--', absDayList, doctorFemaleSums, 'k-.', absDayList, doctorMaleSums, 'k:')
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Doctor recommendation detections")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper left")
plt.savefig(figureDir + "OrientationGenderDoctor.eps")
plotInd += 1
#Plot all the provinces
plt.figure(plotInd)
plt.hold(True)
for k in range(provinceArray.shape[1]):
plt.plot(absDayList, provinceArray[:, k], label=str(k))
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(loc="upper left")
plotInd += 1
#Plot of detection types
plt.figure(plotInd)
plt.plot(absDayList, contactSums, plotStyles2[0], absDayList, donorSums, plotStyles2[1], absDayList, randomTestSums, plotStyles2[2], absDayList, stdSums, plotStyles2[3], absDayList, otherSums, plotStyles2[4], absDayList, recommendSums, plotStyles2[5])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Contact tracing", "Blood donation", "Random test", "STD", "Other test", "Doctor recommendation"), loc="upper left")
plt.savefig(figureDir + "DetectionGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactSums, plotStyleBW[0], absDayList, numTestedSums, plotStyleBW[1], absDayList, numPositiveSums, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Contacts")
plt.legend(("No. contacts", "No. tested", "No. positive"), loc="center left")
plt.savefig(figureDir + "ContactsGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactHeteroSums, plotStyleBW[0], absDayList, numContactBiSums, plotStyleBW[1], absDayList, numContactFemaleSums, plotStyleBW[2], absDayList, numContactMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Total contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "ContactsGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numTestedHeteroSums, plotStyleBW[0], absDayList, numTestedBiSums, plotStyleBW[1], absDayList, numTestedFemaleSums, plotStyleBW[2], absDayList, numTestedMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Tested contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "TestedGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numPositiveHeteroSums, plotStyleBW[0], absDayList, numPositiveBiSums, plotStyleBW[1], absDayList, numPositiveFemaleSums, plotStyleBW[2], absDayList, numPositiveMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Positive contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "PositiveGrowthOrientGen.eps")
plotInd += 1
#Proportion positive versus tested
plt.figure(plotInd)
plt.plot(absDayList, propPositiveHeteroSums, plotStyleBW[0], absDayList, propPositiveBiSums, plotStyleBW[1], absDayList, propPositiveFemaleSums, plotStyleBW[2], absDayList, propPositiveMaleSums, plotStyleBW[3])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Proportion positive contacts")
plt.legend(("Heterosexual", "MSM", "Female", "Male"), loc="upper right")
plt.savefig(figureDir + "PercentPositiveGrowthOrientGen.eps")
plotInd += 1
plt.figure(plotInd)
plt.hold(True)
plt.plot(absDayList, havanaSums, plotStyles2[0])
plt.plot(absDayList, villaClaraSums, plotStyles2[1])
plt.plot(absDayList, pinarSums, plotStyles2[2])
plt.plot(absDayList, holguinSums, plotStyles2[3])
plt.plot(absDayList, habanaSums, plotStyles2[4])
plt.plot(absDayList, sanctiSums, plotStyles2[5])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"), loc="upper left")
plt.savefig(figureDir + "ProvinceGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, numContactVertices, plotStyleBW[0], absDayList, numContactEdges, plotStyleBW[1], absDayList, numInfectEdges, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Vertices/edges")
plt.legend(("Contact vertices", "Contact edges", "Infect edges"), loc="upper left")
plt.savefig(figureDir + "VerticesEdges.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, meanPropDegree, plotStyleBW[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Proportion of final degree")
plt.savefig(figureDir + "MeanPropDegree.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, degreeOneSums, plotStyleBW[0], absDayList, degreeTwoSums, plotStyleBW[1], absDayList, degreeThreePlusSums, plotStyleBW[2])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Detections")
plt.legend(("Degree = 1", "Degree = 2", "Degree >= 3"), loc="upper left")
plotInd += 1
#Print a table of interesting stats
results = numpy.array([havanaSums])
results = numpy.r_[results, numpy.array([villaClaraSums])]
results = numpy.r_[results, numpy.array([pinarSums])]
results = numpy.r_[results, numpy.array([holguinSums])]
results = numpy.r_[results, numpy.array([habanaSums])]
results = numpy.r_[results, numpy.array([sanctiSums])]
print(Latex.listToRow(["Havana City", "Villa Clara", "Pinar del Rio", "Holguin", "La Habana", "Sancti Spiritus"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
results = numpy.array([heteroSums])
results = numpy.r_[results, numpy.array([biSums])]
results = numpy.r_[results, numpy.array([femaleSums])]
results = numpy.r_[results, numpy.array([maleSums])]
print("\n\n")
print(Latex.listToRow(["Heterosexual", "MSM", "Female", "Male"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[:, i].T) + "\\\\")
def computeConfigScalarStats():
logging.info("Computing configuration model scalar stats")
graphFileNameBase = resultsDir + "ConfigGraph"
resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
#graphStats.useFloydWarshall = True
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
if not os.path.isfile(resultsFileName):
configGraph = SparseGraph.load(graphFileNameBase + str(j))
statsArray = graphStats.sequenceScalarStats(configGraph, subgraphIndicesList, slowStats)
Util.savePickle(statsArray, resultsFileName, True)
gc.collect()
logging.info("All done")
def computeConfigVectorStats():
#Note: We can make this multithreaded
logging.info("Computing configuration model vector stats")
graphFileNameBase = resultsDir + "ConfigGraph"
resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
if not os.path.isfile(resultsFileName):
configGraph = SparseGraph.load(graphFileNameBase + str(j))
statsDictList = graphStats.sequenceVectorStats(configGraph, subgraphIndicesList2, eigenStats=False)
Util.savePickle(statsDictList, resultsFileName, False)
gc.collect()
logging.info("All done")
def plotScalarStats():
logging.info("Computing scalar stats")
resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
if saveResults:
statsArray = graphStats.sequenceScalarStats(sGraph, subgraphIndicesList, slowStats)
Util.savePickle(statsArray, resultsFileName, True)
#Now compute statistics on the configuration graphs
else:
statsArray = Util.loadPickle(resultsFileName)
#Take the mean of the results over the configuration model graphs
resultsFileNameBase = resultsDir + "ConfigGraphScalarStats"
numGraphs = len(subgraphIndicesList)
#configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats(), numConfigGraphs))
configStatsArrays = numpy.zeros((numGraphs, graphStats.getNumStats()-2, numConfigGraphs))
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
configStatsArrays[:, :, j] = Util.loadPickle(resultsFileName)
configStatsArray = numpy.mean(configStatsArrays, 2)
configStatsStd = numpy.std(configStatsArrays, 2)
global plotInd
def plotRealConfigError(index, styleReal, styleConfig, realLabel, configLabel):
plt.hold(True)
plt.plot(absDayList, statsArray[:, index], styleReal, label=realLabel)
#errors = numpy.c_[configStatsArray[:, index]-configStatsMinArray[:, index] , configStatsMaxArray[:, index]-configStatsArray[:, index]].T
errors = numpy.c_[configStatsStd[:, index], configStatsStd[:, index]].T
plt.plot(absDayList, configStatsArray[:, index], styleConfig, label=configLabel)
plt.errorbar(absDayList, configStatsArray[:, index], errors, linewidth=0, elinewidth=1, label="_nolegend_", ecolor="red")
xmin, xmax = plt.xlim()
plt.xlim((0, xmax))
ymin, ymax = plt.ylim()
plt.ylim((0, ymax))
#Output all the results into plots
plt.figure(plotInd)
plt.hold(True)
plotRealConfigError(graphStats.maxComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Max comp. vertices", "CM max comp. vertices")
plotRealConfigError(graphStats.maxComponentEdgesIndex, plotStyleBW[1], plotStyles4[1], "Max comp. edges", "CM max comp. edges")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("No. vertices/edges")
plt.legend(loc="upper left")
plt.savefig(figureDir + "MaxComponentSizeGrowth.eps")
plotInd += 1
for k in range(len(dayList)):
day = dayList[k]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(statsArray[k, graphStats.maxComponentEdgesIndex]))
#print(str(DateUtils.getDateStrFromDay(day, startYear)) + ": " + str(configStatsArray[k, graphStats.numComponentsIndex]))
plt.figure(plotInd)
plotRealConfigError(graphStats.numComponentsIndex, plotStyleBW[0], plotStyles4[0], "Size >= 1", "CM size >= 1")
plotRealConfigError(graphStats.numNonSingletonComponentsIndex, plotStyleBW[1], plotStyles4[1], "Size >= 2", "CM size >= 2")
plotRealConfigError(graphStats.numTriOrMoreComponentsIndex, plotStyleBW[2], plotStyles4[2], "Size >= 3", "CM size >= 3")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("No. components")
plt.legend(loc="upper left")
plt.savefig(figureDir + "NumComponentsGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.meanComponentSizeIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean component size")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MeanComponentSizeGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.diameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Max component diameter")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxComponentDiameterGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.effectiveDiameterIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "CM")
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Effective diameter")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxComponentEffDiameterGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.meanDegreeIndex, plotStyleBW[0], plotStyles4[0], "All vertices", "CM all vertices")
plotRealConfigError(graphStats.maxCompMeanDegreeIndex, plotStyleBW[1], plotStyles4[1], "Max component", "CM max component")
#plt.plot(absDayList, statsArray[:, graphStats.meanDegreeIndex], plotStyleBW[0], absDayList, statsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyleBW[1], absDayList, configStatsArray[:, graphStats.meanDegreeIndex], plotStyles4[0], absDayList, configStatsArray[:, graphStats.maxCompMeanDegreeIndex], plotStyles4[1])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean degree")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MeanDegrees.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.densityIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.densityIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.densityIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Density")
plt.legend()
plt.savefig(figureDir + "DensityGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, statsArray[:, graphStats.powerLawIndex], plotStyleBW[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Alpha")
plt.savefig(figureDir + "PowerLawGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.geodesicDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.geodesicDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistanceIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Geodesic distance")
plt.legend(loc="lower right")
plt.savefig(figureDir + "GeodesicGrowth.eps")
plotInd += 1
plt.figure(plotInd)
plotRealConfigError(graphStats.harmonicGeoDistanceIndex, plotStyleBW[0], plotStyles4[0], "Real Graph", "Config Model")
#plt.plot(absDayList, statsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.harmonicGeoDistanceIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(loc="upper right")
plt.savefig(figureDir + "HarmonicGeodesicGrowth.eps")
plotInd += 1
#print(statsArray[:, graphStats.harmonicGeoDistanceIndex])
plt.figure(plotInd)
plotRealConfigError(graphStats.geodesicDistMaxCompIndex, plotStyleBW[0], plotStyles4[0], "Real graph", "Config model")
#plt.plot(absDayList, statsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyleBW[0], absDayList, configStatsArray[:, graphStats.geodesicDistMaxCompIndex], plotStyles4[0])
plt.xticks(locs, labels)
plt.xlabel("Year")
plt.ylabel("Max component mean geodesic distance")
plt.legend(loc="lower right")
plt.savefig(figureDir + "MaxCompGeodesicGrowth.eps")
plotInd += 1
#Find the number of edges in the infection graph
resultsFileName = resultsDir + "InfectGrowthScalarStats.pkl"
infectStatsArray = Util.loadPickle(resultsFileName)
#Make sure we don't include 0 in the array
vertexIndex = numpy.argmax(statsArray[:, graphStats.numVerticesIndex] > 0)
edgeIndex = numpy.argmax(infectStatsArray[:, graphStats.numEdgesIndex] > 0)
minIndex = numpy.maximum(vertexIndex, edgeIndex)
plt.figure(plotInd)
plt.plot(numpy.log(statsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(statsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[0])
plt.plot(numpy.log(infectStatsArray[minIndex:, graphStats.numVerticesIndex]), numpy.log(infectStatsArray[minIndex:, graphStats.numEdgesIndex]), plotStyleBW[1])
plt.plot(numpy.log(statsArray[minIndex:, graphStats.maxComponentSizeIndex]), numpy.log(statsArray[minIndex:, graphStats.maxComponentEdgesIndex]), plotStyleBW[2])
plt.xlabel("log(|V|)")
plt.ylabel("log(|E|)/log(|D|)")
plt.legend(("Contact graph", "Infection graph", "Max component"), loc="upper left")
plt.savefig(figureDir + "LogVerticesEdgesGrowth.eps")
plotInd += 1
results = statsArray[:, graphStats.effectiveDiameterIndex]
results = numpy.c_[results, configStatsArray[:, graphStats.effectiveDiameterIndex]]
results = numpy.c_[results, statsArray[:, graphStats.geodesicDistMaxCompIndex]]
results = numpy.c_[results, configStatsArray[:, graphStats.geodesicDistMaxCompIndex]]
configStatsArray
print("\n\n")
print(Latex.listToRow(["Diameter", "CM Diameter", "Mean Geodesic", "CM Mean Geodesic"]))
print("\\hline")
for i in range(0, len(dayList), 4):
day = dayList[i]
print(str(DateUtils.getDateStrFromDay(day, startYear)) + " & " + Latex.array1DToRow(results[i, :]) + "\\\\")
def plotVectorStats():
#Finally, compute some vector stats at various points in the graph
logging.info("Computing vector stats")
global plotInd
resultsFileName = resultsDir + "ContactGrowthVectorStats.pkl"
if saveResults:
statsDictList = graphStats.sequenceVectorStats(sGraph, subgraphIndicesList2)
Util.savePickle(statsDictList, resultsFileName, False)
else:
statsDictList = Util.loadPickle(resultsFileName)
#Load up configuration model results
configStatsDictList = []
resultsFileNameBase = resultsDir + "ConfigGraphVectorStats"
for j in range(numConfigGraphs):
resultsFileName = resultsFileNameBase + str(j)
configStatsDictList.append(Util.loadPickle(resultsFileName))
#Now need to take mean of 1st element of list
meanConfigStatsDictList = configStatsDictList[0]
for i in range(len(configStatsDictList[0])):
for k in range(1, numConfigGraphs):
for key in configStatsDictList[k][i].keys():
if configStatsDictList[k][i][key].shape[0] > meanConfigStatsDictList[i][key].shape[0]:
meanConfigStatsDictList[i][key] = numpy.r_[meanConfigStatsDictList[i][key], numpy.zeros(configStatsDictList[k][i][key].shape[0] - meanConfigStatsDictList[i][key].shape[0])]
elif configStatsDictList[k][i][key].shape[0] < meanConfigStatsDictList[i][key].shape[0]:
configStatsDictList[k][i][key] = numpy.r_[configStatsDictList[k][i][key], numpy.zeros(meanConfigStatsDictList[i][key].shape[0] - configStatsDictList[k][i][key].shape[0])]
meanConfigStatsDictList[i][key] += configStatsDictList[k][i][key]
for key in configStatsDictList[0][i].keys():
meanConfigStatsDictList[i][key] = meanConfigStatsDictList[i][key]/numConfigGraphs
triangleDistArray = numpy.zeros((len(dayList2), 100))
configTriangleDistArray = numpy.zeros((len(dayList2), 100))
hopPlotArray = numpy.zeros((len(dayList2), 27))
configHopPlotArray = numpy.zeros((len(dayList2), 30))
componentsDistArray = numpy.zeros((len(dayList2), 3000))
configComponentsDistArray = numpy.zeros((len(dayList2), 3000))
numVerticesEdgesArray = numpy.zeros((len(dayList2), 2), numpy.int)
numVerticesEdgesArray[:, 0] = [len(sgl) for sgl in subgraphIndicesList2]
numVerticesEdgesArray[:, 1] = [sGraph.subgraph(sgl).getNumEdges() for sgl in subgraphIndicesList2]
binWidths = numpy.arange(0, 0.50, 0.05)
eigVectorDists = numpy.zeros((len(dayList2), binWidths.shape[0]-1), numpy.int)
femaleSums = numpy.zeros(len(dayList2))
maleSums = numpy.zeros(len(dayList2))
heteroSums = numpy.zeros(len(dayList2))
biSums = numpy.zeros(len(dayList2))
contactSums = numpy.zeros(len(dayList2))
nonContactSums = numpy.zeros(len(dayList2))
donorSums = numpy.zeros(len(dayList2))
randomTestSums = numpy.zeros(len(dayList2))
stdSums = numpy.zeros(len(dayList2))
prisonerSums = numpy.zeros(len(dayList2))
recommendSums = numpy.zeros(len(dayList2))
meanAges = numpy.zeros(len(dayList2))
degrees = numpy.zeros((len(dayList2), 20))
provinces = numpy.zeros((len(dayList2), 15))
havanaSums = numpy.zeros(len(dayList2))
villaClaraSums = numpy.zeros(len(dayList2))
pinarSums = numpy.zeros(len(dayList2))
holguinSums = numpy.zeros(len(dayList2))
habanaSums = numpy.zeros(len(dayList2))
sanctiSums = numpy.zeros(len(dayList2))
meanDegrees = numpy.zeros(len(dayList2))
stdDegrees = numpy.zeros(len(dayList2))
#Note that death has a lot of missing values
for j in range(len(dayList2)):
dateStr = (str(DateUtils.getDateStrFromDay(dayList2[j], startYear)))
logging.info(dateStr)
statsDict = statsDictList[j]
configStatsDict = meanConfigStatsDictList[j]
degreeDist = statsDict["outDegreeDist"]
degreeDist = degreeDist/float(numpy.sum(degreeDist))
#Note that degree distribution for configuration graph will be identical
eigenDist = statsDict["eigenDist"]
eigenDist = numpy.log(eigenDist[eigenDist>=10**-1])
#configEigenDist = configStatsDict["eigenDist"]
#configEigenDist = numpy.log(configEigenDist[configEigenDist>=10**-1])
hopCount = statsDict["hopCount"]
hopCount = numpy.log10(hopCount)
hopPlotArray[j, 0:hopCount.shape[0]] = hopCount
configHopCount = configStatsDict["hopCount"]
configHopCount = numpy.log10(configHopCount)
#configHopPlotArray[j, 0:configHopCount.shape[0]] = configHopCount
triangleDist = statsDict["triangleDist"]
#triangleDist = numpy.array(triangleDist, numpy.float64)/numpy.sum(triangleDist)
triangleDist = numpy.array(triangleDist, numpy.float64)
triangleDistArray[j, 0:triangleDist.shape[0]] = triangleDist
configTriangleDist = configStatsDict["triangleDist"]
configTriangleDist = numpy.array(configTriangleDist, numpy.float64)/numpy.sum(configTriangleDist)
configTriangleDistArray[j, 0:configTriangleDist.shape[0]] = configTriangleDist
maxEigVector = statsDict["maxEigVector"]
eigenvectorInds = numpy.flipud(numpy.argsort(numpy.abs(maxEigVector)))
top10eigenvectorInds = eigenvectorInds[0:numpy.round(eigenvectorInds.shape[0]/10.0)]
maxEigVector = numpy.abs(maxEigVector[eigenvectorInds])
#print(maxEigVector)
eigVectorDists[j, :] = numpy.histogram(maxEigVector, binWidths)[0]
componentsDist = statsDict["componentsDist"]
componentsDist = numpy.array(componentsDist, numpy.float64)/numpy.sum(componentsDist)
componentsDistArray[j, 0:componentsDist.shape[0]] = componentsDist
configComponentsDist = configStatsDict["componentsDist"]
configComponentsDist = numpy.array(configComponentsDist, numpy.float64)/numpy.sum(configComponentsDist)
configComponentsDistArray[j, 0:configComponentsDist.shape[0]] = configComponentsDist
plotInd2 = plotInd
plt.figure(plotInd2)
plt.plot(numpy.arange(degreeDist.shape[0]), degreeDist, plotStyles2[j], label=dateStr)
plt.xlabel("Degree")
plt.ylabel("Probability")
plt.ylim((0, 0.5))
plt.savefig(figureDir + "DegreeDist" + ".eps")
plt.legend()
plotInd2 += 1
"""
plt.figure(plotInd2)
plt.plot(numpy.arange(eigenDist.shape[0]), eigenDist, label=dateStr)
plt.xlabel("Eigenvalue rank")
plt.ylabel("log(Eigenvalue)")
plt.savefig(figureDir + "EigenDist" + ".eps")
plt.legend()
plotInd2 += 1
"""
#How does kleinberg do the hop plots
plt.figure(plotInd2)
plt.plot(numpy.arange(hopCount.shape[0]), hopCount, plotStyles[j], label=dateStr)
plt.xlabel("k")
plt.ylabel("log10(pairs)")
plt.ylim( (2.5, 7) )
plt.legend(loc="lower right")
plt.savefig(figureDir + "HopCount" + ".eps")
plotInd2 += 1
plt.figure(plotInd2)
plt.plot(numpy.arange(maxEigVector.shape[0]), maxEigVector, plotStyles2[j], label=dateStr)
plt.xlabel("Rank")
plt.ylabel("log(eigenvector coefficient)")
plt.savefig(figureDir + "MaxEigVector" + ".eps")
plt.legend()
plotInd2 += 1
#Compute some information the 10% most central vertices
subgraphIndices = numpy.nonzero(detections <= dayList2[j])[0]
subgraph = sGraph.subgraph(subgraphIndices)
subgraphVertexArray = subgraph.getVertexList().getVertices()
femaleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==1)
maleSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, genderIndex]==0)
heteroSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==0)
biSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, orientationIndex]==1)
contactSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, contactIndex])
donorSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, donorIndex])
randomTestSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, randomTestIndex])
stdSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, stdIndex])
prisonerSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, prisonerIndex])
recommendSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, doctorIndex])
meanAges[j] = numpy.mean(subgraphVertexArray[top10eigenvectorInds, detectionIndex] - subgraphVertexArray[top10eigenvectorInds, dobIndex])/daysInYear
havanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, havanaIndex])
villaClaraSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, villaClaraIndex])
pinarSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, pinarIndex])
holguinSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, holguinIndex])
habanaSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, habanaIndex])
sanctiSums[j] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, sanctiIndex])
provinces[j, :] = numpy.sum(subgraphVertexArray[top10eigenvectorInds, 22:37], 0)
ddist = numpy.bincount(subgraph.outDegreeSequence()[top10eigenvectorInds])
degrees[j, 0:ddist.shape[0]] = numpy.array(ddist, numpy.float)/numpy.sum(ddist)
meanDegrees[j] = numpy.mean(subgraph.outDegreeSequence()[top10eigenvectorInds])
stdDegrees[j] = numpy.std(subgraph.outDegreeSequence()[top10eigenvectorInds])
plt.figure(plotInd2)
plt.plot(numpy.arange(degrees[j, :].shape[0]), degrees[j, :], plotStyles2[j], label=dateStr)
plt.xlabel("Degree")
plt.ylabel("Probability")
#plt.ylim((0, 0.5))
plt.savefig(figureDir + "DegreeDistCentral" + ".eps")
plt.legend()
plotInd2 += 1
precision = 4
dateStrList = [DateUtils.getDateStrFromDay(day, startYear) for day in dayList2]
print("Hop counts")
print(Latex.listToRow(dateStrList))
print(Latex.array2DToRows(hopPlotArray.T))
print("\nHop counts for configuration graphs")
print(Latex.listToRow(dateStrList))
print(Latex.array2DToRows(configHopPlotArray.T))
print("\n\nEdges and vertices")
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(numVerticesEdgesArray.T, precision)))
print("\n\nEigenvector distribution")
print((Latex.array1DToRow(binWidths[1:]) + "\\\\"))
print((Latex.array2DToRows(eigVectorDists)))
print("\n\nDistribution of component sizes")
componentsDistArray = componentsDistArray[:, 0:componentsDist.shape[0]]
nonZeroCols = numpy.sum(componentsDistArray, 0)!=0
componentsDistArray = numpy.r_[numpy.array([numpy.arange(componentsDistArray.shape[1])[nonZeroCols]]), componentsDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(componentsDistArray.T, precision)))
print("\n\nDistribution of component sizes in configuration graphs")
configComponentsDistArray = configComponentsDistArray[:, 0:configComponentsDist.shape[0]]
nonZeroCols = numpy.sum(configComponentsDistArray, 0)!=0
configComponentsDistArray = numpy.r_[numpy.array([numpy.arange(configComponentsDistArray.shape[1])[nonZeroCols]]), configComponentsDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(configComponentsDistArray.T, precision)))
print("\n\nDistribution of triangle participations")
triangleDistArray = triangleDistArray[:, 0:triangleDist.shape[0]]
nonZeroCols = numpy.sum(triangleDistArray, 0)!=0
triangleDistArray = numpy.r_[numpy.array([numpy.arange(triangleDistArray.shape[1])[nonZeroCols]])/2, triangleDistArray[:, nonZeroCols]]
print((Latex.listToRow(dateStrList)))
print((Latex.array2DToRows(triangleDistArray.T, precision)))
configTriangleDistArray = configTriangleDistArray[:, 0:configTriangleDist.shape[0]]
nonZeroCols = numpy.sum(configTriangleDistArray, 0)!=0
configTriangleDistArray = numpy.r_[numpy.array([numpy.arange(configTriangleDistArray.shape[1])[nonZeroCols]])/2, configTriangleDistArray[:, nonZeroCols]]
configTriangleDistArray = numpy.c_[configTriangleDistArray, numpy.zeros((configTriangleDistArray.shape[0], triangleDistArray.shape[1]-configTriangleDistArray.shape[1]))]
print("\n\nDistribution of central vertices")
print((Latex.listToRow(dateStrList)))
subgraphSizes = numpy.array(maleSums + femaleSums, numpy.float)
print("Female & " + Latex.array1DToRow(femaleSums*100/subgraphSizes, 1) + "\\\\")
print("Male & " + Latex.array1DToRow(maleSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Heterosexual & " + Latex.array1DToRow(heteroSums*100/subgraphSizes, 1) + "\\\\")
print("Bisexual & " + Latex.array1DToRow(biSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Contact traced & " + Latex.array1DToRow(contactSums*100/subgraphSizes, 1) + "\\\\")
print("Blood donor & " + Latex.array1DToRow(donorSums*100/subgraphSizes, 1) + "\\\\")
print("RandomTest & " + Latex.array1DToRow(randomTestSums*100/subgraphSizes, 1) + "\\\\")
print("STD & " + Latex.array1DToRow(stdSums*100/subgraphSizes, 1) + "\\\\")
print("Prisoner & " + Latex.array1DToRow(prisonerSums*100/subgraphSizes, 1) + "\\\\")
print("Doctor recommendation & " + Latex.array1DToRow(recommendSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Mean ages (years) & " + Latex.array1DToRow(meanAges, 2) + "\\\\")
print("\hline")
print("Holguin & " + Latex.array1DToRow(holguinSums*100/subgraphSizes, 1) + "\\\\")
print("La Habana & " + Latex.array1DToRow(habanaSums*100/subgraphSizes, 1) + "\\\\")
print("Havana City & " + Latex.array1DToRow(havanaSums*100/subgraphSizes, 1) + "\\\\")
print("Pinar del Rio & " + Latex.array1DToRow(pinarSums*100/subgraphSizes, 1) + "\\\\")
print("Sancti Spiritus & " + Latex.array1DToRow(sanctiSums*100/subgraphSizes, 1) + "\\\\")
print("Villa Clara & " + Latex.array1DToRow(villaClaraSums*100/subgraphSizes, 1) + "\\\\")
print("\hline")
print("Mean degrees & " + Latex.array1DToRow(meanDegrees, 2) + "\\\\")
print("Std degrees & " + Latex.array1DToRow(stdDegrees, 2) + "\\\\")
print("\n\nProvinces")
print(Latex.array2DToRows(provinces))
print("\n\nDegree distribution")
print(Latex.array2DToRows(degrees))
def plotOtherStats():
#Let's look at geodesic distances in subgraphs and communities
logging.info("Computing other stats")
resultsFileName = resultsDir + "ContactGrowthOtherStats.pkl"
hivGraphStats = HIVGraphStatistics(fInds)
if saveResults:
statsArray = hivGraphStats.sequenceScalarStats(sGraph, subgraphIndicesList)
#statsArray["dayList"] = absDayList
Util.savePickle(statsArray, resultsFileName, True)
else:
statsArray = Util.loadPickle(resultsFileName)
#Just load the harmonic geodesic distances of the full graph
resultsFileName = resultsDir + "ContactGrowthScalarStats.pkl"
statsArray2 = Util.loadPickle(resultsFileName)
global plotInd
msmGeodesic = statsArray[:, hivGraphStats.msmGeodesicIndex]
msmGeodesic[msmGeodesic < 0] = 0
msmGeodesic[msmGeodesic == float('inf')] = 0
#Output all the results into plots
plt.figure(plotInd)
plt.plot(absDayList, msmGeodesic, 'k-', absDayList, statsArray[:, hivGraphStats.mostConnectedGeodesicIndex], 'k--')
plt.xticks(locs, labels)
#plt.ylim([0, 0.1])
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(("MSM individuals", "Top 10% degree"), loc="upper right")
plt.savefig(figureDir + "MSM10Geodesic" + ".eps")
plotInd += 1
plt.figure(plotInd)
plt.plot(absDayList, statsArray2[:, graphStats.harmonicGeoDistanceIndex], 'k-', absDayList, statsArray[:, hivGraphStats.menSubgraphGeodesicIndex], 'k--')
plt.xticks(locs, labels)
plt.ylim([0, 200.0])
plt.xlabel("Year")
plt.ylabel("Mean harmonic geodesic distance")
plt.legend(("All individuals", "Men subgraph"), loc="upper right")
plt.savefig(figureDir + "MenSubgraphGeodesic" + ".eps")
plotInd += 1
#plotVertexStats()
plotScalarStats()
#plotVectorStats()
#plotOtherStats()
plt.show()
#computeConfigScalarStats()
#computeConfigVectorStats()
"""
Probability of adding node based on degree - try to find how we can generate data.
Mean Time between first and last infection for each person
""" | gpl-3.0 |
UNR-AERIAL/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
wilselby/diy_driverless_car_ROS | rover_cv/camera_cal/src/camera_cal/camera_cal.py | 1 | 6503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/paramaggarwal/CarND-Advanced-Lane-Lines/blob/master/Notebook.ipynb
from __future__ import print_function
from __future__ import division
import sys
import traceback
import rospy
import numpy as np
import cv2
import pickle
import glob
import time
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class camera_calibarion(object):
def __init__(self):
"""ROS Subscriptions """
self.image_pub = rospy.Publisher("/camera_calibation/image_corrected",Image, queue_size=10)
self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image)
""" Variables """
self.bridge = CvBridge()
self.latestImage = None
self.outputImage = None
self.process = False
self.calibrated = False
self.correctedImage = None
self.mtx = None
self.dist = None
def cvt_image(self,data):
try:
self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if self.process != True:
self.process = True
def camera_cal(self, image):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
nx = 8
ny = 6
dst = np.copy(image)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny * nx, 3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Search for chessboard corners
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY)
ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS))
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
self.calibrated = True
print ("FOUND!")
#Draw and display the corners
cv2.drawChessboardCorners(image, (nx, ny), corners, ret)
# Do camera calibration given object points and image points
ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = self.mtx
dist_pickle["dist"] = self.dist
dist_pickle['objpoints'] = objpoints
dist_pickle['imgpoints'] = imgpoints
pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) )
#else:
#print("Searching...")
return image
def drawQuad(self, image, points, color=[255, 0, 0], thickness=4):
p1, p2, p3, p4 = points
cv2.line(image, tuple(p1), tuple(p2), color, thickness)
cv2.line(image, tuple(p2), tuple(p3), color, thickness)
cv2.line(image, tuple(p3), tuple(p4), color, thickness)
cv2.line(image, tuple(p4), tuple(p1), color, thickness)
def perspective_transform(self, image, debug=True, size_top=70, size_bottom=370):
height, width = image.shape[0:2]
output_size = height/2
#src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]])
src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]])
dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]])
#dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]])
M = cv2.getPerspectiveTransform(src, dst)
print(M)
warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR)
if debug:
self.drawQuad(image, src, [255, 0, 0])
self.drawQuad(image, dst, [255, 255, 0])
plt.imshow(image)
plt.show()
return warped
def undistort_image(self, image):
return cv2.undistort(image, self.mtx, self.dist, None, self.mtx)
def run(self):
while True:
# Only run loop if we have an image
if self.process:
filename = "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/check_test.png"
image = cv2.imread(filename, flags=cv2.IMREAD_COLOR)
if self.calibrated is not True:
#print("Calibrating...")
cornersImage = self.camera_cal(image)
cvImage = cornersImage
else:
correctedImage = self.undistort_image(self.latestImage) # Distortion Correction Function
transformImage = self.perspective_transform(self.latestImage)
cvImage = transformImage
# Publish Undistorted Image
try:
imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") #"mono8" "bgr8"
self.image_pub.publish(imgmsg)
except CvBridgeError as e:
print(e)
def main(args):
rospy.init_node('camera_calibarion', anonymous=True)
cc = camera_calibarion()
cc.run()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| bsd-2-clause |
victorbergelin/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
CoolProp/CoolProp | wrappers/Python/CoolProp/Plots/PsychScript.py | 2 | 2020 |
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__ == '__main__':
import numpy, matplotlib
from CoolProp.HumidAirProp import HAPropsSI
from CoolProp.Plots.Plots import InlineLabel
p = 101325
Tdb = numpy.linspace(-10, 60, 100) + 273.15
# Make the figure and the axes
fig = matplotlib.pyplot.figure(figsize=(10, 8))
ax = fig.add_axes((0.1, 0.1, 0.85, 0.85))
# Saturation line
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb]
ax.plot(Tdb - 273.15, w, lw=2)
# Humidity lines
RHValues = [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for RH in RHValues:
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
ax.plot(Tdb - 273.15, w, 'r', lw=1)
# Humidity lines
for H in [-20000, -10000, 0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000]:
# Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15
T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15
w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0)
w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0)
ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1)
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
xv = Tdb # [K]
for RH in [0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
y = HAPropsSI('W', 'P', p, 'H', 65000.000000, 'R', RH)
T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax)
string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%'
bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5)
ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts)
matplotlib.pyplot.show()
| mit |
convexopt/gpkit | gpkit/tests/t_examples.py | 1 | 6270 | """Unit testing of tests in docs/source/examples"""
import unittest
import os
import numpy as np
from gpkit import settings
from gpkit.tests.helpers import generate_example_tests
from gpkit.small_scripts import mag
from gpkit.small_classes import Quantity
def assert_logtol(first, second, logtol=1e-6):
"Asserts that the logs of two arrays have a given abstol"
np.testing.assert_allclose(np.log(mag(first)), np.log(mag(second)),
atol=logtol, rtol=0)
# pylint: disable=too-many-public-methods
class TestExamples(unittest.TestCase):
"""
To test a new example, add a function called `test_$EXAMPLENAME`, where
$EXAMPLENAME is the name of your example in docs/source/examples without
the file extension.
This function should accept two arguments (e.g. 'self' and 'example').
The imported example script will be passed to the second: anything that
was a global variable (e.g, "sol") in the original script is available
as an attribute (e.g., "example.sol")
If you don't want to perform any checks on the example besides making
sure it runs, just put "pass" as the function's body, e.g.:
def test_dummy_example(self, example):
pass
But it's good practice to ensure the example's solution as well, e.g.:
def test_dummy_example(self, example):
self.assertAlmostEqual(example.sol["cost"], 3.121)
"""
# TODO: allow enabling plotting examples, make plots in correct folder...
# def test_plot_sweep1d(self, _):
# import matplotlib.pyplot as plt
# plt.close("all")
def test_autosweep(self, example):
from gpkit import ureg
bst1, tol1 = example.bst1, example.tol1
bst2, tol2 = example.bst2, example.tol2
l_ = np.linspace(1, 10, 100)
for bst in [bst1, example.bst1_loaded]:
sol1 = bst.sample_at(l_)
assert_logtol(sol1("l"), l_)
assert_logtol(sol1("A"), l_**2 + 1, tol1)
assert_logtol(sol1["cost"], (l_**2 + 1)**2, tol1)
if hasattr(sol1["cost"], "units"): # loaded costs are unitless
self.assertEqual(Quantity(1.0, sol1["cost"].units),
Quantity(1.0, ureg.m)**4)
self.assertEqual(Quantity(1.0, sol1("A").units),
Quantity(1.0, ureg.m)**2)
ndig = -int(np.log10(tol2))
self.assertAlmostEqual(bst2.cost_at("cost", 3), 1.0, ndig)
# before corner
A_bc = np.linspace(1, 3, 50)
sol_bc = bst2.sample_at(A_bc)
assert_logtol(sol_bc("A"), (A_bc/3)**0.5, tol2)
assert_logtol(sol_bc["cost"], A_bc/3, tol2)
# after corner
A_ac = np.linspace(3, 10, 50)
sol_ac = bst2.sample_at(A_ac)
assert_logtol(sol_ac("A"), (A_ac/3)**2, tol2)
assert_logtol(sol_ac["cost"], (A_ac/3)**4, tol2)
def test_model_var_access(self, example):
model = example.PS
_ = model["E"]
with self.assertRaises(ValueError):
_ = model["m"] # multiple variables called m
def test_performance_modeling(self, example):
pass
def test_sp_to_gp_sweep(self, example):
pass
def test_boundschecking(self, example):
pass
def test_vectorize(self, example):
pass
def test_primal_infeasible_ex1(self, example):
with self.assertRaises(RuntimeWarning) as cm:
example.m.solve(verbosity=0)
err = cm.exception
if "mosek" in err.message:
self.assertIn("PRIM_INFEAS_CER", err.message)
elif "cvxopt" in err.message:
self.assertIn("unknown", err.message)
def test_primal_infeasible_ex2(self, example):
with self.assertRaises(RuntimeWarning):
example.m.solve(verbosity=0)
def test_docstringparsing(self, example):
pass
def test_debug(self, example):
pass
def test_simple_sp(self, example):
pass
def test_simple_box(self, example):
pass
def test_x_greaterthan_1(self, example):
pass
def test_beam(self, example):
self.assertFalse(np.isnan(example.sol("w")).any())
def test_water_tank(self, example):
pass
def test_sin_approx_example(self, example):
pass
def test_external_sp(self, example):
pass
def test_external_sp2(self, example):
pass
def test_simpleflight(self, example):
self.assertTrue(example.sol.almost_equal(example.sol_loaded))
for sol in [example.sol, example.sol_loaded]:
freevarcheck = {
"A": 8.46,
"C_D": 0.0206,
"C_f": 0.0036,
"C_L": 0.499,
"Re": 3.68e+06,
"S": 16.4,
"W": 7.34e+03,
"V": 38.2,
"W_w": 2.40e+03
}
# sensitivity values from p. 34 of W. Hoburg's thesis
senscheck = {
r"(\frac{S}{S_{wet}})": 0.4300,
"e": -0.4785,
"V_{min}": -0.3691,
"k": 0.4300,
r"\mu": 0.0860,
"(CDA0)": 0.0915,
"C_{L,max}": -0.1845,
r"\tau": -0.2903,
"N_{ult}": 0.2903,
"W_0": 1.0107,
r"\rho": -0.2275
}
for key in freevarcheck:
sol_rat = mag(sol["variables"][key])/freevarcheck[key]
self.assertTrue(abs(1-sol_rat) < 1e-2)
for key in senscheck:
sol_rat = sol["sensitivities"]["constants"][key]/senscheck[key]
self.assertTrue(abs(1-sol_rat) < 1e-2)
def test_relaxation(self, example):
pass
def test_unbounded(self, example):
pass
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
EXAMPLE_DIR = os.path.abspath(FILE_DIR + '../../../docs/source/examples')
SOLVERS = settings["installed_solvers"]
if os.path.isdir(EXAMPLE_DIR):
TESTS = generate_example_tests(EXAMPLE_DIR, [TestExamples], SOLVERS)
else:
TESTS = []
if __name__ == "__main__":
# pylint:disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| mit |
liffiton/ATLeS | src/analysis/plot.py | 1 | 11295 | import math
import re
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import collections, lines, patches
from analysis import heatmaps
import config
# Source: https://gist.github.com/jasonmc/1160951
def _set_foregroundcolor(ax, color):
'''For the specified axes, sets the color of the frame, major ticks,
tick labels, axis labels, title and legend
'''
for tl in ax.get_xticklines() + ax.get_yticklines():
tl.set_color(color)
for spine in ax.spines:
ax.spines[spine].set_edgecolor(color)
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_color(color)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_color(color)
ax.axes.xaxis.label.set_color(color)
ax.axes.yaxis.label.set_color(color)
ax.axes.xaxis.get_offset_text().set_color(color)
ax.axes.yaxis.get_offset_text().set_color(color)
ax.axes.title.set_color(color)
lh = ax.get_legend()
if lh is not None:
lh.get_title().set_color(color)
lh.legendPatch.set_edgecolor('none')
labels = lh.get_texts()
for lab in labels:
lab.set_color(color)
for tl in ax.get_xticklabels():
tl.set_color(color)
for tl in ax.get_yticklabels():
tl.set_color(color)
# Source: https://gist.github.com/jasonmc/1160951
def _set_backgroundcolor(ax, color):
'''Sets the background color of the current axes (and legend).
Use 'None' (with quotes) for transparent. To get transparent
background on saved figures, use:
pp.savefig("fig1.svg", transparent=True)
'''
ax.patch.set_facecolor(color)
lh = ax.get_legend()
if lh is not None:
lh.legendPatch.set_facecolor(color)
def format_axis(ax):
_set_foregroundcolor(ax, '0.5')
_set_backgroundcolor(ax, '0.08')
# drop plot borders
for spine in ax.spines:
ax.spines[spine].set_visible(False)
def _format_figure(fig):
fig.patch.set_facecolor('0.12')
plt.tight_layout()
def show():
''' Shows the current figure (on screen, if using a GUI backend).
Create a plot first using a TrackPlotter object. '''
fig = plt.gcf()
_format_figure(fig)
plt.show()
plt.close('all')
def savefig(outfile, format=None):
''' Saves the current figure to the given filename or file-like object.
Format is inferred from the file extension if a name is given,
otherwise specify it manually with the format parameter.
Large (tall) figures are broken into multiple images (vertical tiles)
if outfile is a string (filename).
Create a plot first using a TrackPlotter object or other code that
creates a pyplot figure.
'''
fig = plt.gcf()
_format_figure(fig)
# A bit of an ugly hack to split giant images into multiple parts
# Only used if outfile is given as a string (filename)
max_height = 100 if isinstance(outfile, str) else float('inf')
# plot height in inches
height = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted()).height
if height > max_height:
numparts = int(height / max_height) + 1
for i in range(numparts):
filename = re.sub(r"(\.[^\.]+)$", r"%02d\1" % (numparts-i), outfile)
bbox = matplotlib.transforms.Bbox.from_extents([0,i*max_height,12,min(height,(i+1)*max_height)])
plt.savefig(filename, facecolor=fig.get_facecolor(), edgecolor='none', bbox_inches=bbox, format=format)
else:
plt.savefig(outfile, facecolor=fig.get_facecolor(), edgecolor='none', format=format)
plt.close('all')
class TrackPlotter(object):
def __init__(self, track_processor, dbgframes=None):
self._track = track_processor
self._dbgframes = dbgframes
@staticmethod
def _speed2color(speed):
# setup ranges, where 0 maps to first number, 1.0 maps to second
color_ranges = {
'r': (0.8, 1.0),
'g': (0.8, 0.0),
'b': (0.8, 0.0)
}
def scale(inval, color):
range = color_ranges[color]
scaled = range[0] + (range[1] - range[0]) * inval
return min(1, max(0, scaled)) # constrain to 0-1
r = scale(speed, 'r')
g = scale(speed, 'g')
b = scale(speed, 'b')
return (r,g,b, 0.5)
def plot_trace(self):
# one minute per subplot
numplots = self._track.len_minutes
fig = plt.figure(figsize=(12,2*(numplots+1)))
# Draw the legend at the top
self.draw_legend(plt.subplot(numplots+1, 1, 1))
for i in range(numplots):
ax = plt.subplot(numplots+1, 1, i+2)
self._plot_trace_portion(ax, start_min=i, end_min=i+1)
return fig
def draw_legend(self, legend_ax):
# Make a legend with proxy artists
xpos_artist = lines.Line2D([],[], color='orange')
ypos_artist = lines.Line2D([],[], color='limegreen')
numpts_artist = lines.Line2D([],[], color='purple', linewidth=1)
frozen_artist = patches.Rectangle((0,0), 1, 1, fc='lightblue', ec='None')
missing_artist = patches.Rectangle((0,0), 1, 1, fc='yellow', ec='None')
lost_artist = patches.Rectangle((0,0), 1, 1, fc='red', ec='None')
# Place it in center of top "subplot" area
legend_ax.legend(
[xpos_artist, ypos_artist, numpts_artist,
frozen_artist, missing_artist, lost_artist],
['x-pos', 'y-pos', '# Detection pts',
'Frozen', 'Missing', 'Lost'],
loc='center',
fontsize=12,
ncol=4,
)
legend_ax.axis('off')
format_axis(legend_ax)
def plot_invalidheatmap(self):
title = "Map of shame (loc of invalid data)"
plt.figure(figsize=(4, 4))
ax = plt.gca()
ax.set_title(title)
format_axis(ax)
nbins = 50
badpoints = (self._track.df.valid != True) # noqa: E712
heatmaps.plot_heatmap(ax, self._track.df.x[badpoints], self._track.df.y[badpoints], nbins=nbins)
def plot_heatmap(self, plot_type='overall'):
assert plot_type in ('per-minute', 'per-phase', 'overall')
if plot_type == 'per-minute':
numplots = self._track.len_minutes
elif plot_type == 'per-phase':
numplots = self._track.num_phases()
phase_starts = self._track.phase_starts()
phase_ends = phase_starts[1:] + [2**30]
elif plot_type == 'overall':
numplots = 1
numrows = int(math.ceil(numplots / 10.0))
if plot_type == 'overall':
plt.figure(figsize=(4, 4))
else:
plt.figure(figsize=(2*min(numplots, 10), 2*numrows))
for i in range(numplots):
if plot_type == 'per-minute':
start_min = i
end_min = i+1
title = "{}:00-{}:00".format(start_min, end_min)
elif plot_type == 'per-phase':
start_min = phase_starts[i]
end_min = phase_ends[i]
title = "Phase {} ({}:00-{}:00)".format(i+1, start_min, end_min)
elif plot_type == 'overall':
start_min = 0
end_min = 2**30
title = "Overall heatmap"
ax = plt.subplot(numrows, min(numplots, 10), i+1)
if numplots > 1:
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
format_axis(ax)
ax.set_title(title)
nbins = 50
start_sec = start_min*60
end_sec = end_min*60
heatmaps.plot_heatmap(ax, self._track.df.x[start_sec:end_sec], self._track.df.y[start_sec:end_sec], nbins=nbins)
def _plot_trace_portion(self, ax, start_min, end_min):
''' Parameters:
start_min, end_min:
Integer minutes.
Plot should be from start:00 to end:00.
'''
# shorthand
df = self._track.df
start = start_min * 60
end = end_min * 60
time = df.index.to_series()[start:end].values
#theta = self._track.theta[start:end]
#speed = self._track.speed[start:end]
#valid = self._track.valid[start:end]
lost = df.lost[start:end].values
missing = df.missing[start:end].values
frozen = df.frozen[start:end].values
x = df.x[start:end].values
y = df.y[start:end].values
numpts = df.numpts[start:end].values
# Format nicely
format_axis(ax)
ax.axes.get_yaxis().set_visible(False)
# Get set axes (specifically, we don't want the y-axis to be autoscaled for us)
ax.axis([start, end, -1.0, 1.0])
# Mark lost/missing sections
lost_collection = collections.BrokenBarHCollection.span_where(
time,
-1.0, -0.9,
lost,
edgecolors='none',
facecolors='red',
)
ax.add_collection(lost_collection)
missing_collection = collections.BrokenBarHCollection.span_where(
time,
-1.0, -0.9,
missing,
edgecolors='none',
facecolors='yellow',
)
ax.add_collection(missing_collection)
# Mark frozen sections
frozen_collection = collections.BrokenBarHCollection.span_where(
time,
-0.85, -0.8,
frozen,
edgecolors='none',
facecolors='lightblue',
)
ax.add_collection(frozen_collection)
# Plot horizontal position
ax.plot(time, x*2-1, color='orange', label='x position')
# Plot height
ax.plot(time, y*2-1, color='limegreen', label='y position')
# Plot numpts (scaled so 0 = -1.0 (plot bottom), 20 = 1.0 (top))
ax.plot(time, -1.0+(numpts/10.0), color='purple', linewidth=1, label='# detected points')
# Add stick plot of movement (where valid)
# ax.quiver(
# time, [0] * len(time),
# speed*np.cos(theta), speed*np.sin(theta),
# color=[self._speed2color(s) for s in speed],
# scale=1, # scale all to a speed of 1, which should be close to max (tank is 1.0x1.0)
# scale_units='y',
# width=0.01,
# units='inches',
# headlength=0, headwidth=0, headaxislength=0 # no arrowheads
# )
# Add markers/links to debugframes if given
# Get [tracking]:start_frame for proper offset of debug frame numbers into track data here
start_frame = int(self._track.config['tracking']['start_frame'])
for dbgframe in self._dbgframes:
nameparts = dbgframe.name.split('_')
frameindex = max(0, int(nameparts[1]) - start_frame) # restrict to index 0 at minimum
frametime = self._track.df.index[frameindex]
if start <= frametime < end:
marker = matplotlib.patches.Circle(
(frametime, -1.1), radius=0.08,
color='#337AB7',
clip_on=False,
url=str("/data" / dbgframe.relative_to(config.DATADIR))
)
ax.add_artist(marker)
| mit |
ambikeshwar1991/sandhi-2 | module/gr36/gr-filter/examples/interpolate.py | 13 | 8584 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
lgarren/spack | var/spack/repos/builtin/packages/py-iminuit/package.py | 3 | 1800 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyIminuit(PythonPackage):
"""Interactive IPython-Friendly Minimizer based on SEAL Minuit2."""
homepage = "https://pypi.python.org/pypi/iminuit"
url = "https://pypi.io/packages/source/i/iminuit/iminuit-1.2.tar.gz"
version('1.2', '4701ec472cae42015e26251703e6e984')
# Required dependencies
depends_on('py-setuptools', type='build')
# Optional dependencies
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-cython', type='build')
| lgpl-2.1 |
MarineLasbleis/GrowYourIC | notebooks/Yoshida.py | 1 | 4212 | # -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt #for figures
#from mpl_toolkits.basemap import Basemap #to render maps
import math
from GrowYourIC import tracers, positions, geodyn, geodyn_trg, geodyn_static, plot_data, data, geodyn_analytical_flows
#plt.rcParams['figure.figsize'] = (8.0, 3.0) #size of figures
cm = plt.cm.get_cmap('viridis_r')
#V = 0.2 # translation velocity
#S2 = 1/5.
#Yoshida = geodyn_analytical_flows.Yoshida96(V, S=S2)
#file = "Fig/Yoshida_{}_S2_{}".format(V, S2)
#print(file)
V = [0.2, 0.4]
S2 = [1/5., 4/5., 2.]
for vitesse in V:
for value_S in S2:
Yoshida = geodyn_analytical_flows.Yoshida96(vitesse, S=value_S)
file = "Fig/Yoshida_{}_S2_{}".format(vitesse, value_S)
print(file)
npoints = 50 #number of points in the x direction for the data set.
data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
data_set.method = "bt_point"
# Age plot with velocity field
proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
plt.savefig(file+"_age.pdf")
# accumulated deformation
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#plt.savefig(file+"_vM_acc.pdf")
#data_set.plot_c_vec(Yoshida, proxy=np.log10(proxy), cm=cm, nameproxy="log_vMises_acc")
#plt.savefig(file+"_log_vM_acc.pdf")
# tracers with age
#tracers.Swarm(5, Yoshida, Yoshida.tau_ic/400, "ici", plane="meridional")
#data_set = data.PerfectSamplingCut(20, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, nameproxy="age")
#plt.show()
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
#Karato = geodyn_analytical_flows.Model_LorentzForce()
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#Karato.P = 1e-4
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#proxy = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy, cm=cm, nameproxy="age")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="age")
#plt.savefig(file+"_tage.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_tau_ic")
#plt.savefig(file+"_t_vM.pdf")
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_cart", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_cart")
#plt.savefig("Yoshida_vM.pdf")
#Karato.P = 1e4
#proxy_1 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="vMises_tau_ic", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_1, cm=cm, nameproxy="vMises_tau_ic")
#npoints = 50 #number of points in the x direction for the data set.
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy_2 = geodyn.evaluate_proxy(data_set, Karato, proxy_type="age", verbose = False)
#data_set.plot_c_vec(Karato, proxy=proxy_2, cm=cm, nameproxy="age")
#npoints = 100 #number of points in the x direction for the data set.
#data_set = data.PerfectSamplingCut(npoints, rICB = 1.)
#data_set.method = "bt_point"
#proxy = geodyn.evaluate_proxy(data_set, Yoshida, proxy_type="vMises_acc", verbose = False)
#data_set.plot_c_vec(Yoshida, proxy=proxy, cm=cm, nameproxy="vMises_acc")
plt.show() | mit |
lsiemens/lsiemens.github.io | theory/fractional_calculus/code/old/FCC2.py | 1 | 1663 | """
Ideas about fractional calculus defined on C^2
J^b f(x, a) = f(x, a + b)
"""
import numpy
from matplotlib import pyplot
from scipy import special
def monomial(x, a, x_0, a_0):
return (x - x_0)**(a - a_0)/special.gamma(a - a_0 + 1)
def exp(x, a, b):
return b**(-a)*numpy.exp(b*x)
def projx(f, x, a):
n = numpy.searchsorted(numpy.real(a), 0.0)
pyplot.plot(x, f[-n, :])
pyplot.show()
def proja(f, x, a):
n = numpy.searchsorted(numpy.real(x), 0.0)
pyplot.plot(a, f[:, -n])
pyplot.show()
def plotR(f, vmin=-10, vmax=10):
_plot_C3(numpy.real(f), vmin=vmin, vmax=vmax)
def plotI(f, vmin=-10, vmax=10):
_plot_C3(numpy.imag(f), vmin=vmin, vmax=vmax)
def plotM(f, vmax=10):
_plot_C3(numpy.abs(f), vmax=vmax)
def plotMl(f):
_plot_C3(numpy.log(numpy.abs(f)))
def _plot_C3(f, vmin=None, vmax=None):
pyplot.imshow(f, extent = [x_0, x_1, a_0, a_1], vmin=vmin, vmax=vmax)
pyplot.show()
x_0, x_1, Nx = -5, 5, 1000
a_0, a_1, Na = -5, 5, 1000
X = numpy.linspace(x_0, x_1, Nx, dtype=numpy.complex)
dx = (x_1 - x_0)/(Nx - 1)
da = (a_1 - a_0)/(Na - 1)
A = numpy.linspace(a_0, a_1, Na, dtype=numpy.complex)
domain_x, domain_a = numpy.meshgrid(X, A[::-1])
F = monomial(domain_x, domain_a, 0, -1)
G = monomial(domain_x, domain_a, 1, -1) + monomial(domain_x, domain_a, 1, 0)
G = -monomial(domain_x, domain_a, 1, -1) + 0.5*monomial(domain_x, domain_a, 0, -3)
G = (exp(domain_x, domain_a, 1.0j) + exp(domain_x, domain_a, -1.0j))/2.0
#G = (exp(domain_x, domain_a, 2.0j) - exp(domain_x, domain_a, -2.0j))/2.0
#G = F
Gp = numpy.gradient(G)
#G = Gp[1]
projx(G, X, A)
proja(G, X, A)
plotR(G)
plotI(G)
plotM(G)
plotMl(G)
| mit |
phoebe-project/phoebe2-docs | 2.1/tutorials/saving_and_loading.py | 1 | 2914 | #!/usr/bin/env python
# coding: utf-8
# Saving and Loading
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](building_a_system.ipynb) for more details.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger(clevel='INFO')
b = phoebe.default_binary()
# Saving a Bundle
# -----------------------
#
#
# In[2]:
b['incl@orbit'] = 56.789
# To save the Bundle to a file, we can call the [save](../api/phoebe.parameters.ParameterSet.save.md) method of the Bundle and pass a filename.
# In[3]:
print b.save('test.phoebe')
# We can now inspect the contents of the created file.
#
# This file is in the JSON-format and is simply a list of dictionaries - where each dictionary represents the attributes of a single Parameter.
#
# You could edit this file in a text-editor - but do be careful if changing any of the tags. For example: if you want to change the component tag of one of your stars, make sure to change ALL instances of the component tag to match (as well as the hierarchy Parameter).
# In[4]:
get_ipython().system('head -n 30 test.phoebe')
# Loading a Bundle
# ----------------------
# To open an existing Bundle from the file we just created, call [Bundle.open](../api/phoebe.frontend.bundle.Bundle.open.md) and pass the filename.
# In[5]:
b2 = phoebe.Bundle.open('test.phoebe')
# Just to prove this worked, we can check to make sure we retained the changed value of inclination.
# In[6]:
print b2.get_value('incl@orbit')
# Support for Other Codes
# ------------------------------
#
# ### Legacy
#
# Importing from a PHOEBE Legacy file is as simple as passing the filename to [from_legacy](../api/phoebe.frontend.bundle.Bundle.from_legacy.md):
# In[7]:
b = phoebe.Bundle.from_legacy('legacy.phoebe')
# Exporting to a PHOEBE Legacy file is also possible (although note that some parameters don't translate exactly or are not supported in PHOEBE Legacy), via [b.export_legacy](../api/phoebe.frontend.bundle.Bundle.export_legacy.md).
# In[8]:
b.export_legacy('legacy_export.phoebe')
# For the parameters that could not be directly translated, you should see a warning message (if you have warning messages enabled in your logger).
#
# We can now look at the beginning of the saved file and see that it matches the PHOEBE Legacy file-format.
# In[9]:
get_ipython().system('head -n 30 legacy_export.phoebe')
# Next
# ---------
#
# Next up: let's learn all about [constraints](constraints.ipynb)
| gpl-3.0 |
idlead/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
b0noI/AIF2 | src/test/integration/python/threshold_p_for_first_filter_separator_character.py | 3 | 50964 | # data collected by PropertyBasedSettingsTest.experimentWith_threshold_p_for_first_filter_separator_character
data = [
{"value": 0.000000, "errors": 55},
{"value": 0.000500, "errors": 55},
{"value": 0.001000, "errors": 55},
{"value": 0.001500, "errors": 54},
{"value": 0.002000, "errors": 54},
{"value": 0.002500, "errors": 54},
{"value": 0.003000, "errors": 53},
{"value": 0.003500, "errors": 53},
{"value": 0.004000, "errors": 53},
{"value": 0.004500, "errors": 53},
{"value": 0.005000, "errors": 53},
{"value": 0.005500, "errors": 53},
{"value": 0.006000, "errors": 53},
{"value": 0.006500, "errors": 53},
{"value": 0.007000, "errors": 53},
{"value": 0.007500, "errors": 53},
{"value": 0.008000, "errors": 53},
{"value": 0.008500, "errors": 53},
{"value": 0.009000, "errors": 53},
{"value": 0.009500, "errors": 53},
{"value": 0.010000, "errors": 53},
{"value": 0.010500, "errors": 53},
{"value": 0.011000, "errors": 53},
{"value": 0.011500, "errors": 53},
{"value": 0.012000, "errors": 53},
{"value": 0.012500, "errors": 53},
{"value": 0.013000, "errors": 53},
{"value": 0.013500, "errors": 53},
{"value": 0.014000, "errors": 53},
{"value": 0.014500, "errors": 53},
{"value": 0.015000, "errors": 53},
{"value": 0.015500, "errors": 53},
{"value": 0.016000, "errors": 53},
{"value": 0.016500, "errors": 53},
{"value": 0.017000, "errors": 53},
{"value": 0.017500, "errors": 53},
{"value": 0.018000, "errors": 53},
{"value": 0.018500, "errors": 53},
{"value": 0.019000, "errors": 53},
{"value": 0.019500, "errors": 53},
{"value": 0.020000, "errors": 53},
{"value": 0.020500, "errors": 53},
{"value": 0.021000, "errors": 53},
{"value": 0.021500, "errors": 53},
{"value": 0.022000, "errors": 53},
{"value": 0.022500, "errors": 53},
{"value": 0.023000, "errors": 53},
{"value": 0.023500, "errors": 53},
{"value": 0.024000, "errors": 53},
{"value": 0.024500, "errors": 53},
{"value": 0.025000, "errors": 53},
{"value": 0.025500, "errors": 53},
{"value": 0.026000, "errors": 53},
{"value": 0.026500, "errors": 53},
{"value": 0.027000, "errors": 53},
{"value": 0.027500, "errors": 53},
{"value": 0.028000, "errors": 53},
{"value": 0.028500, "errors": 53},
{"value": 0.029000, "errors": 53},
{"value": 0.029500, "errors": 53},
{"value": 0.030000, "errors": 53},
{"value": 0.030500, "errors": 53},
{"value": 0.031000, "errors": 53},
{"value": 0.031500, "errors": 53},
{"value": 0.032000, "errors": 53},
{"value": 0.032500, "errors": 53},
{"value": 0.033000, "errors": 53},
{"value": 0.033500, "errors": 53},
{"value": 0.034000, "errors": 53},
{"value": 0.034500, "errors": 53},
{"value": 0.035000, "errors": 53},
{"value": 0.035500, "errors": 53},
{"value": 0.036000, "errors": 53},
{"value": 0.036500, "errors": 53},
{"value": 0.037000, "errors": 53},
{"value": 0.037500, "errors": 53},
{"value": 0.038000, "errors": 53},
{"value": 0.038500, "errors": 53},
{"value": 0.039000, "errors": 53},
{"value": 0.039500, "errors": 53},
{"value": 0.040000, "errors": 53},
{"value": 0.040500, "errors": 53},
{"value": 0.041000, "errors": 53},
{"value": 0.041500, "errors": 53},
{"value": 0.042000, "errors": 53},
{"value": 0.042500, "errors": 53},
{"value": 0.043000, "errors": 53},
{"value": 0.043500, "errors": 53},
{"value": 0.044000, "errors": 53},
{"value": 0.044500, "errors": 53},
{"value": 0.045000, "errors": 53},
{"value": 0.045500, "errors": 53},
{"value": 0.046000, "errors": 53},
{"value": 0.046500, "errors": 53},
{"value": 0.047000, "errors": 53},
{"value": 0.047500, "errors": 53},
{"value": 0.048000, "errors": 53},
{"value": 0.048500, "errors": 53},
{"value": 0.049000, "errors": 53},
{"value": 0.049500, "errors": 53},
{"value": 0.050000, "errors": 53},
{"value": 0.050500, "errors": 53},
{"value": 0.051000, "errors": 53},
{"value": 0.051500, "errors": 53},
{"value": 0.052000, "errors": 53},
{"value": 0.052500, "errors": 53},
{"value": 0.053000, "errors": 53},
{"value": 0.053500, "errors": 53},
{"value": 0.054000, "errors": 53},
{"value": 0.054500, "errors": 53},
{"value": 0.055000, "errors": 53},
{"value": 0.055500, "errors": 53},
{"value": 0.056000, "errors": 53},
{"value": 0.056500, "errors": 53},
{"value": 0.057000, "errors": 53},
{"value": 0.057500, "errors": 53},
{"value": 0.058000, "errors": 53},
{"value": 0.058500, "errors": 53},
{"value": 0.059000, "errors": 53},
{"value": 0.059500, "errors": 53},
{"value": 0.060000, "errors": 53},
{"value": 0.060500, "errors": 53},
{"value": 0.061000, "errors": 53},
{"value": 0.061500, "errors": 53},
{"value": 0.062000, "errors": 53},
{"value": 0.062500, "errors": 53},
{"value": 0.063000, "errors": 53},
{"value": 0.063500, "errors": 53},
{"value": 0.064000, "errors": 53},
{"value": 0.064500, "errors": 53},
{"value": 0.065000, "errors": 53},
{"value": 0.065500, "errors": 53},
{"value": 0.066000, "errors": 53},
{"value": 0.066500, "errors": 53},
{"value": 0.067000, "errors": 53},
{"value": 0.067500, "errors": 53},
{"value": 0.068000, "errors": 53},
{"value": 0.068500, "errors": 53},
{"value": 0.069000, "errors": 53},
{"value": 0.069500, "errors": 53},
{"value": 0.070000, "errors": 53},
{"value": 0.070500, "errors": 53},
{"value": 0.071000, "errors": 53},
{"value": 0.071500, "errors": 53},
{"value": 0.072000, "errors": 53},
{"value": 0.072500, "errors": 53},
{"value": 0.073000, "errors": 53},
{"value": 0.073500, "errors": 53},
{"value": 0.074000, "errors": 53},
{"value": 0.074500, "errors": 53},
{"value": 0.075000, "errors": 53},
{"value": 0.075500, "errors": 53},
{"value": 0.076000, "errors": 53},
{"value": 0.076500, "errors": 53},
{"value": 0.077000, "errors": 53},
{"value": 0.077500, "errors": 53},
{"value": 0.078000, "errors": 53},
{"value": 0.078500, "errors": 53},
{"value": 0.079000, "errors": 53},
{"value": 0.079500, "errors": 53},
{"value": 0.080000, "errors": 53},
{"value": 0.080500, "errors": 53},
{"value": 0.081000, "errors": 53},
{"value": 0.081500, "errors": 53},
{"value": 0.082000, "errors": 53},
{"value": 0.082500, "errors": 53},
{"value": 0.083000, "errors": 53},
{"value": 0.083500, "errors": 53},
{"value": 0.084000, "errors": 53},
{"value": 0.084500, "errors": 53},
{"value": 0.085000, "errors": 53},
{"value": 0.085500, "errors": 53},
{"value": 0.086000, "errors": 53},
{"value": 0.086500, "errors": 53},
{"value": 0.087000, "errors": 53},
{"value": 0.087500, "errors": 53},
{"value": 0.088000, "errors": 53},
{"value": 0.088500, "errors": 53},
{"value": 0.089000, "errors": 53},
{"value": 0.089500, "errors": 53},
{"value": 0.090000, "errors": 55},
{"value": 0.090500, "errors": 55},
{"value": 0.091000, "errors": 55},
{"value": 0.091500, "errors": 55},
{"value": 0.092000, "errors": 55},
{"value": 0.092500, "errors": 55},
{"value": 0.093000, "errors": 55},
{"value": 0.093500, "errors": 55},
{"value": 0.094000, "errors": 55},
{"value": 0.094500, "errors": 55},
{"value": 0.095000, "errors": 55},
{"value": 0.095500, "errors": 55},
{"value": 0.096000, "errors": 55},
{"value": 0.096500, "errors": 55},
{"value": 0.097000, "errors": 55},
{"value": 0.097500, "errors": 55},
{"value": 0.098000, "errors": 54},
{"value": 0.098500, "errors": 54},
{"value": 0.099000, "errors": 54},
{"value": 0.099500, "errors": 54},
{"value": 0.100000, "errors": 54},
{"value": 0.100500, "errors": 54},
{"value": 0.101000, "errors": 54},
{"value": 0.101500, "errors": 54},
{"value": 0.102000, "errors": 54},
{"value": 0.102500, "errors": 54},
{"value": 0.103000, "errors": 54},
{"value": 0.103500, "errors": 54},
{"value": 0.104000, "errors": 54},
{"value": 0.104500, "errors": 54},
{"value": 0.105000, "errors": 54},
{"value": 0.105500, "errors": 54},
{"value": 0.106000, "errors": 54},
{"value": 0.106500, "errors": 54},
{"value": 0.107000, "errors": 54},
{"value": 0.107500, "errors": 54},
{"value": 0.108000, "errors": 54},
{"value": 0.108500, "errors": 54},
{"value": 0.109000, "errors": 54},
{"value": 0.109500, "errors": 54},
{"value": 0.110000, "errors": 54},
{"value": 0.110500, "errors": 54},
{"value": 0.111000, "errors": 54},
{"value": 0.111500, "errors": 54},
{"value": 0.112000, "errors": 54},
{"value": 0.112500, "errors": 54},
{"value": 0.113000, "errors": 54},
{"value": 0.113500, "errors": 57},
{"value": 0.114000, "errors": 57},
{"value": 0.114500, "errors": 57},
{"value": 0.115000, "errors": 57},
{"value": 0.115500, "errors": 60},
{"value": 0.116000, "errors": 63},
{"value": 0.116500, "errors": 63},
{"value": 0.117000, "errors": 65},
{"value": 0.117500, "errors": 67},
{"value": 0.118000, "errors": 67},
{"value": 0.118500, "errors": 67},
{"value": 0.119000, "errors": 69},
{"value": 0.119500, "errors": 69},
{"value": 0.120000, "errors": 73},
{"value": 0.120500, "errors": 75},
{"value": 0.121000, "errors": 77},
{"value": 0.121500, "errors": 81},
{"value": 0.122000, "errors": 83},
{"value": 0.122500, "errors": 87},
{"value": 0.123000, "errors": 89},
{"value": 0.123500, "errors": 89},
{"value": 0.124000, "errors": 92},
{"value": 0.124500, "errors": 92},
{"value": 0.125000, "errors": 92},
{"value": 0.125500, "errors": 95},
{"value": 0.126000, "errors": 95},
{"value": 0.126500, "errors": 98},
{"value": 0.127000, "errors": 98},
{"value": 0.127500, "errors": 101},
{"value": 0.128000, "errors": 101},
{"value": 0.128500, "errors": 101},
{"value": 0.129000, "errors": 103},
{"value": 0.129500, "errors": 103},
{"value": 0.130000, "errors": 103},
{"value": 0.130500, "errors": 103},
{"value": 0.131000, "errors": 105},
{"value": 0.131500, "errors": 107},
{"value": 0.132000, "errors": 107},
{"value": 0.132500, "errors": 109},
{"value": 0.133000, "errors": 109},
{"value": 0.133500, "errors": 109},
{"value": 0.134000, "errors": 109},
{"value": 0.134500, "errors": 109},
{"value": 0.135000, "errors": 112},
{"value": 0.135500, "errors": 118},
{"value": 0.136000, "errors": 119},
{"value": 0.136500, "errors": 122},
{"value": 0.137000, "errors": 122},
{"value": 0.137500, "errors": 125},
{"value": 0.138000, "errors": 127},
{"value": 0.138500, "errors": 127},
{"value": 0.139000, "errors": 132},
{"value": 0.139500, "errors": 138},
{"value": 0.140000, "errors": 138},
{"value": 0.140500, "errors": 140},
{"value": 0.141000, "errors": 140},
{"value": 0.141500, "errors": 140},
{"value": 0.142000, "errors": 144},
{"value": 0.142500, "errors": 144},
{"value": 0.143000, "errors": 147},
{"value": 0.143500, "errors": 153},
{"value": 0.144000, "errors": 155},
{"value": 0.144500, "errors": 154},
{"value": 0.145000, "errors": 158},
{"value": 0.145500, "errors": 171},
{"value": 0.146000, "errors": 177},
{"value": 0.146500, "errors": 180},
{"value": 0.147000, "errors": 186},
{"value": 0.147500, "errors": 188},
{"value": 0.148000, "errors": 194},
{"value": 0.148500, "errors": 196},
{"value": 0.149000, "errors": 208},
{"value": 0.149500, "errors": 209},
{"value": 0.150000, "errors": 215},
{"value": 0.150500, "errors": 225},
{"value": 0.151000, "errors": 233},
{"value": 0.151500, "errors": 254},
{"value": 0.152000, "errors": 261},
{"value": 0.152500, "errors": 270},
{"value": 0.153000, "errors": 279},
{"value": 0.153500, "errors": 284},
{"value": 0.154000, "errors": 294},
{"value": 0.154500, "errors": 297},
{"value": 0.155000, "errors": 301},
{"value": 0.155500, "errors": 315},
{"value": 0.156000, "errors": 324},
{"value": 0.156500, "errors": 326},
{"value": 0.157000, "errors": 334},
{"value": 0.157500, "errors": 341},
{"value": 0.158000, "errors": 346},
{"value": 0.158500, "errors": 354},
{"value": 0.159000, "errors": 365},
{"value": 0.159500, "errors": 371},
{"value": 0.160000, "errors": 388},
{"value": 0.160500, "errors": 400},
{"value": 0.161000, "errors": 412},
{"value": 0.161500, "errors": 414},
{"value": 0.162000, "errors": 419},
{"value": 0.162500, "errors": 428},
{"value": 0.163000, "errors": 429},
{"value": 0.163500, "errors": 434},
{"value": 0.164000, "errors": 438},
{"value": 0.164500, "errors": 449},
{"value": 0.165000, "errors": 452},
{"value": 0.165500, "errors": 462},
{"value": 0.166000, "errors": 475},
{"value": 0.166500, "errors": 478},
{"value": 0.167000, "errors": 478},
{"value": 0.167500, "errors": 478},
{"value": 0.168000, "errors": 488},
{"value": 0.168500, "errors": 492},
{"value": 0.169000, "errors": 498},
{"value": 0.169500, "errors": 504},
{"value": 0.170000, "errors": 509},
{"value": 0.170500, "errors": 521},
{"value": 0.171000, "errors": 525},
{"value": 0.171500, "errors": 530},
{"value": 0.172000, "errors": 534},
{"value": 0.172500, "errors": 549},
{"value": 0.173000, "errors": 559},
{"value": 0.173500, "errors": 565},
{"value": 0.174000, "errors": 570},
{"value": 0.174500, "errors": 575},
{"value": 0.175000, "errors": 579},
{"value": 0.175500, "errors": 587},
{"value": 0.176000, "errors": 588},
{"value": 0.176500, "errors": 594},
{"value": 0.177000, "errors": 600},
{"value": 0.177500, "errors": 606},
{"value": 0.178000, "errors": 623},
{"value": 0.178500, "errors": 627},
{"value": 0.179000, "errors": 637},
{"value": 0.179500, "errors": 643},
{"value": 0.180000, "errors": 643},
{"value": 0.180500, "errors": 648},
{"value": 0.181000, "errors": 650},
{"value": 0.181500, "errors": 651},
{"value": 0.182000, "errors": 656},
{"value": 0.182500, "errors": 665},
{"value": 0.183000, "errors": 666},
{"value": 0.183500, "errors": 669},
{"value": 0.184000, "errors": 673},
{"value": 0.184500, "errors": 675},
{"value": 0.185000, "errors": 677},
{"value": 0.185500, "errors": 679},
{"value": 0.186000, "errors": 680},
{"value": 0.186500, "errors": 686},
{"value": 0.187000, "errors": 686},
{"value": 0.187500, "errors": 688},
{"value": 0.188000, "errors": 691},
{"value": 0.188500, "errors": 693},
{"value": 0.189000, "errors": 701},
{"value": 0.189500, "errors": 701},
{"value": 0.190000, "errors": 704},
{"value": 0.190500, "errors": 704},
{"value": 0.191000, "errors": 707},
{"value": 0.191500, "errors": 707},
{"value": 0.192000, "errors": 709},
{"value": 0.192500, "errors": 711},
{"value": 0.193000, "errors": 717},
{"value": 0.193500, "errors": 717},
{"value": 0.194000, "errors": 719},
{"value": 0.194500, "errors": 720},
{"value": 0.195000, "errors": 721},
{"value": 0.195500, "errors": 721},
{"value": 0.196000, "errors": 721},
{"value": 0.196500, "errors": 721},
{"value": 0.197000, "errors": 721},
{"value": 0.197500, "errors": 721},
{"value": 0.198000, "errors": 724},
{"value": 0.198500, "errors": 724},
{"value": 0.199000, "errors": 724},
{"value": 0.199500, "errors": 726},
{"value": 0.200000, "errors": 726},
{"value": 0.200500, "errors": 726},
{"value": 0.201000, "errors": 730},
{"value": 0.201500, "errors": 735},
{"value": 0.202000, "errors": 735},
{"value": 0.202500, "errors": 735},
{"value": 0.203000, "errors": 735},
{"value": 0.203500, "errors": 736},
{"value": 0.204000, "errors": 736},
{"value": 0.204500, "errors": 736},
{"value": 0.205000, "errors": 736},
{"value": 0.205500, "errors": 736},
{"value": 0.206000, "errors": 736},
{"value": 0.206500, "errors": 738},
{"value": 0.207000, "errors": 738},
{"value": 0.207500, "errors": 738},
{"value": 0.208000, "errors": 738},
{"value": 0.208500, "errors": 738},
{"value": 0.209000, "errors": 739},
{"value": 0.209500, "errors": 740},
{"value": 0.210000, "errors": 743},
{"value": 0.210500, "errors": 743},
{"value": 0.211000, "errors": 745},
{"value": 0.211500, "errors": 745},
{"value": 0.212000, "errors": 745},
{"value": 0.212500, "errors": 747},
{"value": 0.213000, "errors": 747},
{"value": 0.213500, "errors": 747},
{"value": 0.214000, "errors": 747},
{"value": 0.214500, "errors": 747},
{"value": 0.215000, "errors": 747},
{"value": 0.215500, "errors": 747},
{"value": 0.216000, "errors": 747},
{"value": 0.216500, "errors": 747},
{"value": 0.217000, "errors": 747},
{"value": 0.217500, "errors": 749},
{"value": 0.218000, "errors": 749},
{"value": 0.218500, "errors": 749},
{"value": 0.219000, "errors": 749},
{"value": 0.219500, "errors": 749},
{"value": 0.220000, "errors": 750},
{"value": 0.220500, "errors": 750},
{"value": 0.221000, "errors": 750},
{"value": 0.221500, "errors": 750},
{"value": 0.222000, "errors": 750},
{"value": 0.222500, "errors": 750},
{"value": 0.223000, "errors": 750},
{"value": 0.223500, "errors": 750},
{"value": 0.224000, "errors": 750},
{"value": 0.224500, "errors": 750},
{"value": 0.225000, "errors": 750},
{"value": 0.225500, "errors": 750},
{"value": 0.226000, "errors": 750},
{"value": 0.226500, "errors": 750},
{"value": 0.227000, "errors": 750},
{"value": 0.227500, "errors": 750},
{"value": 0.228000, "errors": 750},
{"value": 0.228500, "errors": 750},
{"value": 0.229000, "errors": 750},
{"value": 0.229500, "errors": 750},
{"value": 0.230000, "errors": 750},
{"value": 0.230500, "errors": 750},
{"value": 0.231000, "errors": 750},
{"value": 0.231500, "errors": 750},
{"value": 0.232000, "errors": 750},
{"value": 0.232500, "errors": 750},
{"value": 0.233000, "errors": 750},
{"value": 0.233500, "errors": 750},
{"value": 0.234000, "errors": 750},
{"value": 0.234500, "errors": 751},
{"value": 0.235000, "errors": 751},
{"value": 0.235500, "errors": 751},
{"value": 0.236000, "errors": 751},
{"value": 0.236500, "errors": 751},
{"value": 0.237000, "errors": 751},
{"value": 0.237500, "errors": 751},
{"value": 0.238000, "errors": 751},
{"value": 0.238500, "errors": 752},
{"value": 0.239000, "errors": 752},
{"value": 0.239500, "errors": 752},
{"value": 0.240000, "errors": 754},
{"value": 0.240500, "errors": 754},
{"value": 0.241000, "errors": 754},
{"value": 0.241500, "errors": 754},
{"value": 0.242000, "errors": 754},
{"value": 0.242500, "errors": 754},
{"value": 0.243000, "errors": 754},
{"value": 0.243500, "errors": 754},
{"value": 0.244000, "errors": 754},
{"value": 0.244500, "errors": 754},
{"value": 0.245000, "errors": 754},
{"value": 0.245500, "errors": 754},
{"value": 0.246000, "errors": 754},
{"value": 0.246500, "errors": 754},
{"value": 0.247000, "errors": 754},
{"value": 0.247500, "errors": 754},
{"value": 0.248000, "errors": 754},
{"value": 0.248500, "errors": 754},
{"value": 0.249000, "errors": 754},
{"value": 0.249500, "errors": 754},
{"value": 0.250000, "errors": 754},
{"value": 0.250500, "errors": 754},
{"value": 0.251000, "errors": 754},
{"value": 0.251500, "errors": 754},
{"value": 0.252000, "errors": 754},
{"value": 0.252500, "errors": 754},
{"value": 0.253000, "errors": 754},
{"value": 0.253500, "errors": 754},
{"value": 0.254000, "errors": 754},
{"value": 0.254500, "errors": 754},
{"value": 0.255000, "errors": 754},
{"value": 0.255500, "errors": 754},
{"value": 0.256000, "errors": 754},
{"value": 0.256500, "errors": 754},
{"value": 0.257000, "errors": 754},
{"value": 0.257500, "errors": 754},
{"value": 0.258000, "errors": 754},
{"value": 0.258500, "errors": 754},
{"value": 0.259000, "errors": 754},
{"value": 0.259500, "errors": 754},
{"value": 0.260000, "errors": 754},
{"value": 0.260500, "errors": 754},
{"value": 0.261000, "errors": 754},
{"value": 0.261500, "errors": 754},
{"value": 0.262000, "errors": 754},
{"value": 0.262500, "errors": 754},
{"value": 0.263000, "errors": 754},
{"value": 0.263500, "errors": 754},
{"value": 0.264000, "errors": 754},
{"value": 0.264500, "errors": 754},
{"value": 0.265000, "errors": 754},
{"value": 0.265500, "errors": 754},
{"value": 0.266000, "errors": 754},
{"value": 0.266500, "errors": 754},
{"value": 0.267000, "errors": 754},
{"value": 0.267500, "errors": 754},
{"value": 0.268000, "errors": 754},
{"value": 0.268500, "errors": 754},
{"value": 0.269000, "errors": 754},
{"value": 0.269500, "errors": 754},
{"value": 0.270000, "errors": 754},
{"value": 0.270500, "errors": 754},
{"value": 0.271000, "errors": 754},
{"value": 0.271500, "errors": 754},
{"value": 0.272000, "errors": 754},
{"value": 0.272500, "errors": 754},
{"value": 0.273000, "errors": 754},
{"value": 0.273500, "errors": 754},
{"value": 0.274000, "errors": 754},
{"value": 0.274500, "errors": 754},
{"value": 0.275000, "errors": 754},
{"value": 0.275500, "errors": 754},
{"value": 0.276000, "errors": 754},
{"value": 0.276500, "errors": 754},
{"value": 0.277000, "errors": 754},
{"value": 0.277500, "errors": 754},
{"value": 0.278000, "errors": 754},
{"value": 0.278500, "errors": 754},
{"value": 0.279000, "errors": 754},
{"value": 0.279500, "errors": 754},
{"value": 0.280000, "errors": 754},
{"value": 0.280500, "errors": 754},
{"value": 0.281000, "errors": 754},
{"value": 0.281500, "errors": 754},
{"value": 0.282000, "errors": 754},
{"value": 0.282500, "errors": 754},
{"value": 0.283000, "errors": 753},
{"value": 0.283500, "errors": 753},
{"value": 0.284000, "errors": 753},
{"value": 0.284500, "errors": 753},
{"value": 0.285000, "errors": 753},
{"value": 0.285500, "errors": 753},
{"value": 0.286000, "errors": 753},
{"value": 0.286500, "errors": 753},
{"value": 0.287000, "errors": 753},
{"value": 0.287500, "errors": 753},
{"value": 0.288000, "errors": 753},
{"value": 0.288500, "errors": 753},
{"value": 0.289000, "errors": 753},
{"value": 0.289500, "errors": 753},
{"value": 0.290000, "errors": 753},
{"value": 0.290500, "errors": 753},
{"value": 0.291000, "errors": 753},
{"value": 0.291500, "errors": 753},
{"value": 0.292000, "errors": 753},
{"value": 0.292500, "errors": 753},
{"value": 0.293000, "errors": 753},
{"value": 0.293500, "errors": 753},
{"value": 0.294000, "errors": 753},
{"value": 0.294500, "errors": 753},
{"value": 0.295000, "errors": 753},
{"value": 0.295500, "errors": 753},
{"value": 0.296000, "errors": 753},
{"value": 0.296500, "errors": 753},
{"value": 0.297000, "errors": 753},
{"value": 0.297500, "errors": 753},
{"value": 0.298000, "errors": 753},
{"value": 0.298500, "errors": 753},
{"value": 0.299000, "errors": 753},
{"value": 0.299500, "errors": 753},
{"value": 0.300000, "errors": 753},
{"value": 0.300500, "errors": 753},
{"value": 0.301000, "errors": 753},
{"value": 0.301500, "errors": 753},
{"value": 0.302000, "errors": 753},
{"value": 0.302500, "errors": 753},
{"value": 0.303000, "errors": 753},
{"value": 0.303500, "errors": 753},
{"value": 0.304000, "errors": 753},
{"value": 0.304500, "errors": 753},
{"value": 0.305000, "errors": 753},
{"value": 0.305500, "errors": 753},
{"value": 0.306000, "errors": 753},
{"value": 0.306500, "errors": 752},
{"value": 0.307000, "errors": 752},
{"value": 0.307500, "errors": 752},
{"value": 0.308000, "errors": 752},
{"value": 0.308500, "errors": 752},
{"value": 0.309000, "errors": 752},
{"value": 0.309500, "errors": 752},
{"value": 0.310000, "errors": 752},
{"value": 0.310500, "errors": 752},
{"value": 0.311000, "errors": 752},
{"value": 0.311500, "errors": 752},
{"value": 0.312000, "errors": 752},
{"value": 0.312500, "errors": 752},
{"value": 0.313000, "errors": 752},
{"value": 0.313500, "errors": 752},
{"value": 0.314000, "errors": 752},
{"value": 0.314500, "errors": 752},
{"value": 0.315000, "errors": 752},
{"value": 0.315500, "errors": 752},
{"value": 0.316000, "errors": 752},
{"value": 0.316500, "errors": 752},
{"value": 0.317000, "errors": 752},
{"value": 0.317500, "errors": 752},
{"value": 0.318000, "errors": 752},
{"value": 0.318500, "errors": 752},
{"value": 0.319000, "errors": 752},
{"value": 0.319500, "errors": 752},
{"value": 0.320000, "errors": 752},
{"value": 0.320500, "errors": 752},
{"value": 0.321000, "errors": 752},
{"value": 0.321500, "errors": 752},
{"value": 0.322000, "errors": 752},
{"value": 0.322500, "errors": 752},
{"value": 0.323000, "errors": 752},
{"value": 0.323500, "errors": 752},
{"value": 0.324000, "errors": 752},
{"value": 0.324500, "errors": 752},
{"value": 0.325000, "errors": 752},
{"value": 0.325500, "errors": 752},
{"value": 0.326000, "errors": 752},
{"value": 0.326500, "errors": 752},
{"value": 0.327000, "errors": 752},
{"value": 0.327500, "errors": 752},
{"value": 0.328000, "errors": 752},
{"value": 0.328500, "errors": 752},
{"value": 0.329000, "errors": 752},
{"value": 0.329500, "errors": 752},
{"value": 0.330000, "errors": 752},
{"value": 0.330500, "errors": 752},
{"value": 0.331000, "errors": 752},
{"value": 0.331500, "errors": 752},
{"value": 0.332000, "errors": 752},
{"value": 0.332500, "errors": 752},
{"value": 0.333000, "errors": 752},
{"value": 0.333500, "errors": 752},
{"value": 0.334000, "errors": 752},
{"value": 0.334500, "errors": 752},
{"value": 0.335000, "errors": 752},
{"value": 0.335500, "errors": 752},
{"value": 0.336000, "errors": 752},
{"value": 0.336500, "errors": 752},
{"value": 0.337000, "errors": 752},
{"value": 0.337500, "errors": 752},
{"value": 0.338000, "errors": 752},
{"value": 0.338500, "errors": 752},
{"value": 0.339000, "errors": 752},
{"value": 0.339500, "errors": 752},
{"value": 0.340000, "errors": 752},
{"value": 0.340500, "errors": 752},
{"value": 0.341000, "errors": 752},
{"value": 0.341500, "errors": 752},
{"value": 0.342000, "errors": 752},
{"value": 0.342500, "errors": 752},
{"value": 0.343000, "errors": 751},
{"value": 0.343500, "errors": 751},
{"value": 0.344000, "errors": 751},
{"value": 0.344500, "errors": 751},
{"value": 0.345000, "errors": 751},
{"value": 0.345500, "errors": 751},
{"value": 0.346000, "errors": 751},
{"value": 0.346500, "errors": 751},
{"value": 0.347000, "errors": 751},
{"value": 0.347500, "errors": 751},
{"value": 0.348000, "errors": 751},
{"value": 0.348500, "errors": 751},
{"value": 0.349000, "errors": 751},
{"value": 0.349500, "errors": 751},
{"value": 0.350000, "errors": 751},
{"value": 0.350500, "errors": 751},
{"value": 0.351000, "errors": 751},
{"value": 0.351500, "errors": 751},
{"value": 0.352000, "errors": 751},
{"value": 0.352500, "errors": 751},
{"value": 0.353000, "errors": 751},
{"value": 0.353500, "errors": 751},
{"value": 0.354000, "errors": 751},
{"value": 0.354500, "errors": 751},
{"value": 0.355000, "errors": 751},
{"value": 0.355500, "errors": 751},
{"value": 0.356000, "errors": 751},
{"value": 0.356500, "errors": 751},
{"value": 0.357000, "errors": 751},
{"value": 0.357500, "errors": 751},
{"value": 0.358000, "errors": 751},
{"value": 0.358500, "errors": 751},
{"value": 0.359000, "errors": 751},
{"value": 0.359500, "errors": 751},
{"value": 0.360000, "errors": 751},
{"value": 0.360500, "errors": 751},
{"value": 0.361000, "errors": 751},
{"value": 0.361500, "errors": 751},
{"value": 0.362000, "errors": 751},
{"value": 0.362500, "errors": 751},
{"value": 0.363000, "errors": 751},
{"value": 0.363500, "errors": 751},
{"value": 0.364000, "errors": 751},
{"value": 0.364500, "errors": 751},
{"value": 0.365000, "errors": 751},
{"value": 0.365500, "errors": 751},
{"value": 0.366000, "errors": 751},
{"value": 0.366500, "errors": 751},
{"value": 0.367000, "errors": 751},
{"value": 0.367500, "errors": 751},
{"value": 0.368000, "errors": 751},
{"value": 0.368500, "errors": 751},
{"value": 0.369000, "errors": 751},
{"value": 0.369500, "errors": 751},
{"value": 0.370000, "errors": 751},
{"value": 0.370500, "errors": 751},
{"value": 0.371000, "errors": 751},
{"value": 0.371500, "errors": 751},
{"value": 0.372000, "errors": 751},
{"value": 0.372500, "errors": 751},
{"value": 0.373000, "errors": 751},
{"value": 0.373500, "errors": 751},
{"value": 0.374000, "errors": 751},
{"value": 0.374500, "errors": 751},
{"value": 0.375000, "errors": 751},
{"value": 0.375500, "errors": 751},
{"value": 0.376000, "errors": 751},
{"value": 0.376500, "errors": 751},
{"value": 0.377000, "errors": 751},
{"value": 0.377500, "errors": 751},
{"value": 0.378000, "errors": 751},
{"value": 0.378500, "errors": 751},
{"value": 0.379000, "errors": 751},
{"value": 0.379500, "errors": 751},
{"value": 0.380000, "errors": 751},
{"value": 0.380500, "errors": 751},
{"value": 0.381000, "errors": 751},
{"value": 0.381500, "errors": 751},
{"value": 0.382000, "errors": 751},
{"value": 0.382500, "errors": 751},
{"value": 0.383000, "errors": 751},
{"value": 0.383500, "errors": 751},
{"value": 0.384000, "errors": 751},
{"value": 0.384500, "errors": 751},
{"value": 0.385000, "errors": 751},
{"value": 0.385500, "errors": 751},
{"value": 0.386000, "errors": 751},
{"value": 0.386500, "errors": 751},
{"value": 0.387000, "errors": 751},
{"value": 0.387500, "errors": 751},
{"value": 0.388000, "errors": 751},
{"value": 0.388500, "errors": 751},
{"value": 0.389000, "errors": 751},
{"value": 0.389500, "errors": 751},
{"value": 0.390000, "errors": 751},
{"value": 0.390500, "errors": 751},
{"value": 0.391000, "errors": 751},
{"value": 0.391500, "errors": 751},
{"value": 0.392000, "errors": 751},
{"value": 0.392500, "errors": 751},
{"value": 0.393000, "errors": 751},
{"value": 0.393500, "errors": 751},
{"value": 0.394000, "errors": 751},
{"value": 0.394500, "errors": 751},
{"value": 0.395000, "errors": 751},
{"value": 0.395500, "errors": 751},
{"value": 0.396000, "errors": 751},
{"value": 0.396500, "errors": 751},
{"value": 0.397000, "errors": 751},
{"value": 0.397500, "errors": 751},
{"value": 0.398000, "errors": 751},
{"value": 0.398500, "errors": 751},
{"value": 0.399000, "errors": 751},
{"value": 0.399500, "errors": 751},
{"value": 0.400000, "errors": 751},
{"value": 0.400500, "errors": 751},
{"value": 0.401000, "errors": 751},
{"value": 0.401500, "errors": 751},
{"value": 0.402000, "errors": 751},
{"value": 0.402500, "errors": 751},
{"value": 0.403000, "errors": 751},
{"value": 0.403500, "errors": 751},
{"value": 0.404000, "errors": 751},
{"value": 0.404500, "errors": 751},
{"value": 0.405000, "errors": 751},
{"value": 0.405500, "errors": 751},
{"value": 0.406000, "errors": 751},
{"value": 0.406500, "errors": 751},
{"value": 0.407000, "errors": 751},
{"value": 0.407500, "errors": 751},
{"value": 0.408000, "errors": 751},
{"value": 0.408500, "errors": 751},
{"value": 0.409000, "errors": 751},
{"value": 0.409500, "errors": 751},
{"value": 0.410000, "errors": 751},
{"value": 0.410500, "errors": 751},
{"value": 0.411000, "errors": 751},
{"value": 0.411500, "errors": 751},
{"value": 0.412000, "errors": 751},
{"value": 0.412500, "errors": 751},
{"value": 0.413000, "errors": 751},
{"value": 0.413500, "errors": 751},
{"value": 0.414000, "errors": 751},
{"value": 0.414500, "errors": 751},
{"value": 0.415000, "errors": 751},
{"value": 0.415500, "errors": 751},
{"value": 0.416000, "errors": 751},
{"value": 0.416500, "errors": 751},
{"value": 0.417000, "errors": 751},
{"value": 0.417500, "errors": 751},
{"value": 0.418000, "errors": 751},
{"value": 0.418500, "errors": 751},
{"value": 0.419000, "errors": 751},
{"value": 0.419500, "errors": 751},
{"value": 0.420000, "errors": 751},
{"value": 0.420500, "errors": 751},
{"value": 0.421000, "errors": 751},
{"value": 0.421500, "errors": 751},
{"value": 0.422000, "errors": 751},
{"value": 0.422500, "errors": 751},
{"value": 0.423000, "errors": 751},
{"value": 0.423500, "errors": 751},
{"value": 0.424000, "errors": 751},
{"value": 0.424500, "errors": 751},
{"value": 0.425000, "errors": 751},
{"value": 0.425500, "errors": 751},
{"value": 0.426000, "errors": 751},
{"value": 0.426500, "errors": 751},
{"value": 0.427000, "errors": 751},
{"value": 0.427500, "errors": 751},
{"value": 0.428000, "errors": 751},
{"value": 0.428500, "errors": 751},
{"value": 0.429000, "errors": 751},
{"value": 0.429500, "errors": 751},
{"value": 0.430000, "errors": 751},
{"value": 0.430500, "errors": 751},
{"value": 0.431000, "errors": 751},
{"value": 0.431500, "errors": 751},
{"value": 0.432000, "errors": 751},
{"value": 0.432500, "errors": 751},
{"value": 0.433000, "errors": 751},
{"value": 0.433500, "errors": 751},
{"value": 0.434000, "errors": 751},
{"value": 0.434500, "errors": 751},
{"value": 0.435000, "errors": 751},
{"value": 0.435500, "errors": 751},
{"value": 0.436000, "errors": 751},
{"value": 0.436500, "errors": 751},
{"value": 0.437000, "errors": 751},
{"value": 0.437500, "errors": 751},
{"value": 0.438000, "errors": 751},
{"value": 0.438500, "errors": 751},
{"value": 0.439000, "errors": 751},
{"value": 0.439500, "errors": 751},
{"value": 0.440000, "errors": 751},
{"value": 0.440500, "errors": 751},
{"value": 0.441000, "errors": 751},
{"value": 0.441500, "errors": 751},
{"value": 0.442000, "errors": 751},
{"value": 0.442500, "errors": 751},
{"value": 0.443000, "errors": 751},
{"value": 0.443500, "errors": 751},
{"value": 0.444000, "errors": 751},
{"value": 0.444500, "errors": 751},
{"value": 0.445000, "errors": 751},
{"value": 0.445500, "errors": 751},
{"value": 0.446000, "errors": 751},
{"value": 0.446500, "errors": 751},
{"value": 0.447000, "errors": 751},
{"value": 0.447500, "errors": 751},
{"value": 0.448000, "errors": 751},
{"value": 0.448500, "errors": 751},
{"value": 0.449000, "errors": 751},
{"value": 0.449500, "errors": 751},
{"value": 0.450000, "errors": 751},
{"value": 0.450500, "errors": 751},
{"value": 0.451000, "errors": 751},
{"value": 0.451500, "errors": 751},
{"value": 0.452000, "errors": 751},
{"value": 0.452500, "errors": 751},
{"value": 0.453000, "errors": 751},
{"value": 0.453500, "errors": 751},
{"value": 0.454000, "errors": 751},
{"value": 0.454500, "errors": 751},
{"value": 0.455000, "errors": 751},
{"value": 0.455500, "errors": 751},
{"value": 0.456000, "errors": 751},
{"value": 0.456500, "errors": 751},
{"value": 0.457000, "errors": 751},
{"value": 0.457500, "errors": 751},
{"value": 0.458000, "errors": 751},
{"value": 0.458500, "errors": 751},
{"value": 0.459000, "errors": 750},
{"value": 0.459500, "errors": 750},
{"value": 0.460000, "errors": 750},
{"value": 0.460500, "errors": 750},
{"value": 0.461000, "errors": 750},
{"value": 0.461500, "errors": 750},
{"value": 0.462000, "errors": 750},
{"value": 0.462500, "errors": 750},
{"value": 0.463000, "errors": 750},
{"value": 0.463500, "errors": 750},
{"value": 0.464000, "errors": 750},
{"value": 0.464500, "errors": 750},
{"value": 0.465000, "errors": 750},
{"value": 0.465500, "errors": 750},
{"value": 0.466000, "errors": 750},
{"value": 0.466500, "errors": 750},
{"value": 0.467000, "errors": 750},
{"value": 0.467500, "errors": 750},
{"value": 0.468000, "errors": 750},
{"value": 0.468500, "errors": 750},
{"value": 0.469000, "errors": 750},
{"value": 0.469500, "errors": 750},
{"value": 0.470000, "errors": 750},
{"value": 0.470500, "errors": 750},
{"value": 0.471000, "errors": 750},
{"value": 0.471500, "errors": 750},
{"value": 0.472000, "errors": 750},
{"value": 0.472500, "errors": 750},
{"value": 0.473000, "errors": 750},
{"value": 0.473500, "errors": 750},
{"value": 0.474000, "errors": 750},
{"value": 0.474500, "errors": 750},
{"value": 0.475000, "errors": 750},
{"value": 0.475500, "errors": 750},
{"value": 0.476000, "errors": 750},
{"value": 0.476500, "errors": 750},
{"value": 0.477000, "errors": 750},
{"value": 0.477500, "errors": 750},
{"value": 0.478000, "errors": 750},
{"value": 0.478500, "errors": 750},
{"value": 0.479000, "errors": 750},
{"value": 0.479500, "errors": 749},
{"value": 0.480000, "errors": 749},
{"value": 0.480500, "errors": 749},
{"value": 0.481000, "errors": 749},
{"value": 0.481500, "errors": 749},
{"value": 0.482000, "errors": 749},
{"value": 0.482500, "errors": 749},
{"value": 0.483000, "errors": 749},
{"value": 0.483500, "errors": 749},
{"value": 0.484000, "errors": 749},
{"value": 0.484500, "errors": 749},
{"value": 0.485000, "errors": 749},
{"value": 0.485500, "errors": 749},
{"value": 0.486000, "errors": 749},
{"value": 0.486500, "errors": 749},
{"value": 0.487000, "errors": 749},
{"value": 0.487500, "errors": 749},
{"value": 0.488000, "errors": 749},
{"value": 0.488500, "errors": 749},
{"value": 0.489000, "errors": 749},
{"value": 0.489500, "errors": 749},
{"value": 0.490000, "errors": 749},
{"value": 0.490500, "errors": 749},
{"value": 0.491000, "errors": 749},
{"value": 0.491500, "errors": 749},
{"value": 0.492000, "errors": 749},
{"value": 0.492500, "errors": 749},
{"value": 0.493000, "errors": 749},
{"value": 0.493500, "errors": 749},
{"value": 0.494000, "errors": 749},
{"value": 0.494500, "errors": 749},
{"value": 0.495000, "errors": 749},
{"value": 0.495500, "errors": 749},
{"value": 0.496000, "errors": 749},
{"value": 0.496500, "errors": 749},
{"value": 0.497000, "errors": 749},
{"value": 0.497500, "errors": 749},
{"value": 0.498000, "errors": 749},
{"value": 0.498500, "errors": 749},
{"value": 0.499000, "errors": 749},
{"value": 0.499500, "errors": 749},
{"value": 0.500000, "errors": 749},
{"value": 0.500500, "errors": 749},
{"value": 0.501000, "errors": 749},
{"value": 0.501500, "errors": 749},
{"value": 0.502000, "errors": 749},
{"value": 0.502500, "errors": 749},
{"value": 0.503000, "errors": 749},
{"value": 0.503500, "errors": 749},
{"value": 0.504000, "errors": 749},
{"value": 0.504500, "errors": 749},
{"value": 0.505000, "errors": 749},
{"value": 0.505500, "errors": 749},
{"value": 0.506000, "errors": 749},
{"value": 0.506500, "errors": 749},
{"value": 0.507000, "errors": 749},
{"value": 0.507500, "errors": 749},
{"value": 0.508000, "errors": 749},
{"value": 0.508500, "errors": 749},
{"value": 0.509000, "errors": 749},
{"value": 0.509500, "errors": 749},
{"value": 0.510000, "errors": 749},
{"value": 0.510500, "errors": 749},
{"value": 0.511000, "errors": 749},
{"value": 0.511500, "errors": 749},
{"value": 0.512000, "errors": 749},
{"value": 0.512500, "errors": 749},
{"value": 0.513000, "errors": 749},
{"value": 0.513500, "errors": 749},
{"value": 0.514000, "errors": 749},
{"value": 0.514500, "errors": 749},
{"value": 0.515000, "errors": 749},
{"value": 0.515500, "errors": 749},
{"value": 0.516000, "errors": 749},
{"value": 0.516500, "errors": 749},
{"value": 0.517000, "errors": 749},
{"value": 0.517500, "errors": 749},
{"value": 0.518000, "errors": 749},
{"value": 0.518500, "errors": 749},
{"value": 0.519000, "errors": 749},
{"value": 0.519500, "errors": 749},
{"value": 0.520000, "errors": 749},
{"value": 0.520500, "errors": 749},
{"value": 0.521000, "errors": 749},
{"value": 0.521500, "errors": 749},
{"value": 0.522000, "errors": 749},
{"value": 0.522500, "errors": 749},
{"value": 0.523000, "errors": 749},
{"value": 0.523500, "errors": 749},
{"value": 0.524000, "errors": 749},
{"value": 0.524500, "errors": 749},
{"value": 0.525000, "errors": 749},
{"value": 0.525500, "errors": 749},
{"value": 0.526000, "errors": 749},
{"value": 0.526500, "errors": 749},
{"value": 0.527000, "errors": 749},
{"value": 0.527500, "errors": 749},
{"value": 0.528000, "errors": 749},
{"value": 0.528500, "errors": 749},
{"value": 0.529000, "errors": 749},
{"value": 0.529500, "errors": 749},
{"value": 0.530000, "errors": 749},
{"value": 0.530500, "errors": 749},
{"value": 0.531000, "errors": 749},
{"value": 0.531500, "errors": 749},
{"value": 0.532000, "errors": 749},
{"value": 0.532500, "errors": 749},
{"value": 0.533000, "errors": 749},
{"value": 0.533500, "errors": 749},
{"value": 0.534000, "errors": 749},
{"value": 0.534500, "errors": 749},
{"value": 0.535000, "errors": 749},
{"value": 0.535500, "errors": 749},
{"value": 0.536000, "errors": 749},
{"value": 0.536500, "errors": 749},
{"value": 0.537000, "errors": 749},
{"value": 0.537500, "errors": 749},
{"value": 0.538000, "errors": 748},
{"value": 0.538500, "errors": 748},
{"value": 0.539000, "errors": 748},
{"value": 0.539500, "errors": 748},
{"value": 0.540000, "errors": 748},
{"value": 0.540500, "errors": 748},
{"value": 0.541000, "errors": 748},
{"value": 0.541500, "errors": 748},
{"value": 0.542000, "errors": 748},
{"value": 0.542500, "errors": 748},
{"value": 0.543000, "errors": 748},
{"value": 0.543500, "errors": 748},
{"value": 0.544000, "errors": 748},
{"value": 0.544500, "errors": 748},
{"value": 0.545000, "errors": 748},
{"value": 0.545500, "errors": 748},
{"value": 0.546000, "errors": 748},
{"value": 0.546500, "errors": 748},
{"value": 0.547000, "errors": 748},
{"value": 0.547500, "errors": 748},
{"value": 0.548000, "errors": 748},
{"value": 0.548500, "errors": 748},
{"value": 0.549000, "errors": 748},
{"value": 0.549500, "errors": 748},
{"value": 0.550000, "errors": 748},
{"value": 0.550500, "errors": 748},
{"value": 0.551000, "errors": 748},
{"value": 0.551500, "errors": 748},
{"value": 0.552000, "errors": 748},
{"value": 0.552500, "errors": 748},
{"value": 0.553000, "errors": 748},
{"value": 0.553500, "errors": 748},
{"value": 0.554000, "errors": 748},
{"value": 0.554500, "errors": 748},
{"value": 0.555000, "errors": 748},
{"value": 0.555500, "errors": 748},
{"value": 0.556000, "errors": 748},
{"value": 0.556500, "errors": 748},
{"value": 0.557000, "errors": 748},
{"value": 0.557500, "errors": 748},
{"value": 0.558000, "errors": 748},
{"value": 0.558500, "errors": 748},
{"value": 0.559000, "errors": 748},
{"value": 0.559500, "errors": 748},
{"value": 0.560000, "errors": 748},
{"value": 0.560500, "errors": 748},
{"value": 0.561000, "errors": 748},
{"value": 0.561500, "errors": 748},
{"value": 0.562000, "errors": 748},
{"value": 0.562500, "errors": 748},
{"value": 0.563000, "errors": 748},
{"value": 0.563500, "errors": 748},
{"value": 0.564000, "errors": 748},
{"value": 0.564500, "errors": 748},
{"value": 0.565000, "errors": 748},
{"value": 0.565500, "errors": 748},
{"value": 0.566000, "errors": 748},
{"value": 0.566500, "errors": 748},
{"value": 0.567000, "errors": 748},
{"value": 0.567500, "errors": 748},
{"value": 0.568000, "errors": 748},
{"value": 0.568500, "errors": 748},
{"value": 0.569000, "errors": 748},
{"value": 0.569500, "errors": 748},
{"value": 0.570000, "errors": 748},
{"value": 0.570500, "errors": 748},
{"value": 0.571000, "errors": 748},
{"value": 0.571500, "errors": 748},
{"value": 0.572000, "errors": 747},
{"value": 0.572500, "errors": 747},
{"value": 0.573000, "errors": 747},
{"value": 0.573500, "errors": 747},
{"value": 0.574000, "errors": 747},
{"value": 0.574500, "errors": 747},
{"value": 0.575000, "errors": 747},
{"value": 0.575500, "errors": 747},
{"value": 0.576000, "errors": 747},
{"value": 0.576500, "errors": 747},
{"value": 0.577000, "errors": 747},
{"value": 0.577500, "errors": 747},
{"value": 0.578000, "errors": 747},
{"value": 0.578500, "errors": 747},
{"value": 0.579000, "errors": 747},
{"value": 0.579500, "errors": 747},
{"value": 0.580000, "errors": 747},
{"value": 0.580500, "errors": 747},
{"value": 0.581000, "errors": 747},
{"value": 0.581500, "errors": 747},
{"value": 0.582000, "errors": 747},
{"value": 0.582500, "errors": 747},
{"value": 0.583000, "errors": 747},
{"value": 0.583500, "errors": 747},
{"value": 0.584000, "errors": 747},
{"value": 0.584500, "errors": 747},
{"value": 0.585000, "errors": 747},
{"value": 0.585500, "errors": 747},
{"value": 0.586000, "errors": 747},
{"value": 0.586500, "errors": 747},
{"value": 0.587000, "errors": 747},
{"value": 0.587500, "errors": 747},
{"value": 0.588000, "errors": 747},
{"value": 0.588500, "errors": 747},
{"value": 0.589000, "errors": 747},
{"value": 0.589500, "errors": 747},
{"value": 0.590000, "errors": 747},
{"value": 0.590500, "errors": 747},
{"value": 0.591000, "errors": 747},
{"value": 0.591500, "errors": 747},
{"value": 0.592000, "errors": 747},
{"value": 0.592500, "errors": 747},
{"value": 0.593000, "errors": 747},
{"value": 0.593500, "errors": 747},
{"value": 0.594000, "errors": 747},
{"value": 0.594500, "errors": 747},
{"value": 0.595000, "errors": 747},
{"value": 0.595500, "errors": 747},
{"value": 0.596000, "errors": 747},
{"value": 0.596500, "errors": 747},
{"value": 0.597000, "errors": 747},
{"value": 0.597500, "errors": 747},
{"value": 0.598000, "errors": 747},
{"value": 0.598500, "errors": 747},
{"value": 0.599000, "errors": 747},
{"value": 0.599500, "errors": 747},
{"value": 0.600000, "errors": 747},
{"value": 0.600500, "errors": 747},
{"value": 0.601000, "errors": 747},
{"value": 0.601500, "errors": 747},
{"value": 0.602000, "errors": 747},
{"value": 0.602500, "errors": 747},
{"value": 0.603000, "errors": 747},
{"value": 0.603500, "errors": 747},
{"value": 0.604000, "errors": 747},
{"value": 0.604500, "errors": 747},
{"value": 0.605000, "errors": 747},
{"value": 0.605500, "errors": 747},
{"value": 0.606000, "errors": 747},
{"value": 0.606500, "errors": 747},
{"value": 0.607000, "errors": 747},
{"value": 0.607500, "errors": 747},
{"value": 0.608000, "errors": 747},
{"value": 0.608500, "errors": 747},
{"value": 0.609000, "errors": 747},
{"value": 0.609500, "errors": 747},
{"value": 0.610000, "errors": 747},
{"value": 0.610500, "errors": 747},
{"value": 0.611000, "errors": 747},
{"value": 0.611500, "errors": 747},
{"value": 0.612000, "errors": 747},
{"value": 0.612500, "errors": 747},
{"value": 0.613000, "errors": 747},
{"value": 0.613500, "errors": 747},
{"value": 0.614000, "errors": 747},
{"value": 0.614500, "errors": 747},
{"value": 0.615000, "errors": 747},
{"value": 0.615500, "errors": 747},
{"value": 0.616000, "errors": 747},
{"value": 0.616500, "errors": 747},
{"value": 0.617000, "errors": 747},
{"value": 0.617500, "errors": 747},
{"value": 0.618000, "errors": 747},
{"value": 0.618500, "errors": 747},
{"value": 0.619000, "errors": 747},
{"value": 0.619500, "errors": 747},
{"value": 0.620000, "errors": 747},
{"value": 0.620500, "errors": 747},
{"value": 0.621000, "errors": 747},
{"value": 0.621500, "errors": 747},
{"value": 0.622000, "errors": 747},
{"value": 0.622500, "errors": 747},
{"value": 0.623000, "errors": 747},
{"value": 0.623500, "errors": 747},
{"value": 0.624000, "errors": 747},
{"value": 0.624500, "errors": 747},
{"value": 0.625000, "errors": 747},
{"value": 0.625500, "errors": 747},
{"value": 0.626000, "errors": 747},
{"value": 0.626500, "errors": 747},
{"value": 0.627000, "errors": 747},
{"value": 0.627500, "errors": 747},
{"value": 0.628000, "errors": 747},
{"value": 0.628500, "errors": 747},
{"value": 0.629000, "errors": 747},
{"value": 0.629500, "errors": 747},
{"value": 0.630000, "errors": 747},
{"value": 0.630500, "errors": 747},
{"value": 0.631000, "errors": 747},
{"value": 0.631500, "errors": 747},
{"value": 0.632000, "errors": 747},
{"value": 0.632500, "errors": 747},
{"value": 0.633000, "errors": 747},
{"value": 0.633500, "errors": 747},
{"value": 0.634000, "errors": 747},
{"value": 0.634500, "errors": 747},
{"value": 0.635000, "errors": 747},
{"value": 0.635500, "errors": 747},
{"value": 0.636000, "errors": 747},
{"value": 0.636500, "errors": 747},
{"value": 0.637000, "errors": 747},
{"value": 0.637500, "errors": 747},
{"value": 0.638000, "errors": 747},
{"value": 0.638500, "errors": 747},
{"value": 0.639000, "errors": 747},
{"value": 0.639500, "errors": 747},
{"value": 0.640000, "errors": 747},
{"value": 0.640500, "errors": 747},
{"value": 0.641000, "errors": 747},
{"value": 0.641500, "errors": 747},
{"value": 0.642000, "errors": 747},
{"value": 0.642500, "errors": 747},
{"value": 0.643000, "errors": 747},
{"value": 0.643500, "errors": 747},
{"value": 0.644000, "errors": 747},
{"value": 0.644500, "errors": 747},
{"value": 0.645000, "errors": 747},
{"value": 0.645500, "errors": 747},
{"value": 0.646000, "errors": 747},
{"value": 0.646500, "errors": 747},
{"value": 0.647000, "errors": 747},
{"value": 0.647500, "errors": 747},
{"value": 0.648000, "errors": 747},
{"value": 0.648500, "errors": 747},
{"value": 0.649000, "errors": 747},
{"value": 0.649500, "errors": 747},
{"value": 0.650000, "errors": 747},
{"value": 0.650500, "errors": 747},
{"value": 0.651000, "errors": 747},
{"value": 0.651500, "errors": 747},
{"value": 0.652000, "errors": 747},
{"value": 0.652500, "errors": 747},
{"value": 0.653000, "errors": 747},
{"value": 0.653500, "errors": 747},
{"value": 0.654000, "errors": 747},
{"value": 0.654500, "errors": 747},
{"value": 0.655000, "errors": 747},
{"value": 0.655500, "errors": 747},
{"value": 0.656000, "errors": 747},
{"value": 0.656500, "errors": 747},
{"value": 0.657000, "errors": 747},
{"value": 0.657500, "errors": 747},
{"value": 0.658000, "errors": 747},
{"value": 0.658500, "errors": 747},
{"value": 0.659000, "errors": 747},
{"value": 0.659500, "errors": 747},
{"value": 0.660000, "errors": 747},
{"value": 0.660500, "errors": 747},
{"value": 0.661000, "errors": 747},
{"value": 0.661500, "errors": 747},
{"value": 0.662000, "errors": 747},
{"value": 0.662500, "errors": 747},
{"value": 0.663000, "errors": 747},
{"value": 0.663500, "errors": 747},
{"value": 0.664000, "errors": 747},
{"value": 0.664500, "errors": 747},
{"value": 0.665000, "errors": 747},
{"value": 0.665500, "errors": 747},
{"value": 0.666000, "errors": 747},
{"value": 0.666500, "errors": 747},
{"value": 0.667000, "errors": 747},
{"value": 0.667500, "errors": 747},
{"value": 0.668000, "errors": 747},
{"value": 0.668500, "errors": 747},
{"value": 0.669000, "errors": 747},
{"value": 0.669500, "errors": 747},
{"value": 0.670000, "errors": 747},
{"value": 0.670500, "errors": 747},
{"value": 0.671000, "errors": 747},
{"value": 0.671500, "errors": 747},
{"value": 0.672000, "errors": 747},
{"value": 0.672500, "errors": 747},
{"value": 0.673000, "errors": 747},
{"value": 0.673500, "errors": 747},
{"value": 0.674000, "errors": 747},
{"value": 0.674500, "errors": 747},
{"value": 0.675000, "errors": 747},
{"value": 0.675500, "errors": 747},
{"value": 0.676000, "errors": 747},
{"value": 0.676500, "errors": 747},
{"value": 0.677000, "errors": 747},
{"value": 0.677500, "errors": 747},
{"value": 0.678000, "errors": 747},
{"value": 0.678500, "errors": 747},
{"value": 0.679000, "errors": 747},
{"value": 0.679500, "errors": 747},
{"value": 0.680000, "errors": 747},
{"value": 0.680500, "errors": 747},
{"value": 0.681000, "errors": 747},
{"value": 0.681500, "errors": 747},
{"value": 0.682000, "errors": 747},
{"value": 0.682500, "errors": 747},
{"value": 0.683000, "errors": 747},
{"value": 0.683500, "errors": 747},
{"value": 0.684000, "errors": 747},
{"value": 0.684500, "errors": 747},
{"value": 0.685000, "errors": 747},
{"value": 0.685500, "errors": 747},
{"value": 0.686000, "errors": 747},
{"value": 0.686500, "errors": 747},
{"value": 0.687000, "errors": 747},
{"value": 0.687500, "errors": 747},
{"value": 0.688000, "errors": 747},
{"value": 0.688500, "errors": 747},
{"value": 0.689000, "errors": 747},
{"value": 0.689500, "errors": 747},
{"value": 0.690000, "errors": 747},
{"value": 0.690500, "errors": 747},
{"value": 0.691000, "errors": 747},
{"value": 0.691500, "errors": 747},
{"value": 0.692000, "errors": 747},
{"value": 0.692500, "errors": 747},
{"value": 0.693000, "errors": 747},
{"value": 0.693500, "errors": 747},
{"value": 0.694000, "errors": 747},
{"value": 0.694500, "errors": 747},
{"value": 0.695000, "errors": 747},
{"value": 0.695500, "errors": 747},
{"value": 0.696000, "errors": 747},
{"value": 0.696500, "errors": 747},
{"value": 0.697000, "errors": 747},
{"value": 0.697500, "errors": 747},
{"value": 0.698000, "errors": 747},
{"value": 0.698500, "errors": 747},
{"value": 0.699000, "errors": 747},
{"value": 0.699500, "errors": 747},
{"value": 0.700000, "errors": 747},
]
x = []
y = []
for value in data:
x.append(value["value"])
y.append(value["errors"])
from pandas import *
d = {"x": x, "y": y}
df = DataFrame(d)
import matplotlib.pyplot as plt
from pandas.tools.rplot import *
plt.plot(x, y, 'ro')
plt.ylabel('errors')
plt.xlabel('threshold_p_for_first_filter_separator_character')
plt.title('threshold_p_for_first_filter_separator_character vs errors count')
polynomial = Polynomial(x, y, 4)
new_x = []
new_y = []
current_x = 0.
while current_x < 0.62:
new_x.append(current_x)
new_y.append(polynomial.getval(current_x))
current_x += 0.00005
plt.plot(new_x, new_y, 'ro')
print (polynomial.getval(0.)) | mit |
Srisai85/scipy | scipy/stats/kde.py | 27 | 17303 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy._lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
evaluate
__call__
integrate_gaussian
integrate_box_1d
integrate_box
integrate_kde
pdf
logpdf
resample
set_bandwidth
covariance_factor
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
... "Measurement model, return two coupled measurements."
... m1 = np.random.normal(size=n)
... m2 = np.random.normal(scale=0.5, size=n)
... return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that
multiplies the data covariance matrix to obtain the kernel covariance
matrix. The default is `scotts_factor`. A subclass can overwrite this
method to provide a different method, or set it through a call to
`kde.set_bandwidth`."""
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> import scipy.stats as stats
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| bsd-3-clause |
simvisage/oricreate | docs/howtos/ex08_rigid_facets/sim031miura_ori_psi_cntl.py | 1 | 2750 | r'''
Fold the Miura ori crease pattern using psi control
---------------------------------------------------
'''
import numpy as np
from oricreate.api import \
SimulationTask, SimulationConfig, \
FTV, FTA
from oricreate.gu import \
GuConstantLength, GuDofConstraints, GuPsiConstraints, fix
def create_cp_factory():
# begin
from oricreate.api import MiuraOriCPFactory
cp_factory = MiuraOriCPFactory(L_x=30,
L_y=21,
n_x=2,
n_y=2,
d_0=3.0,
d_1=-3.0)
# end
return cp_factory
if __name__ == '__main__':
cpf = create_cp_factory()
cp = cpf.formed_object
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
cp.plot_mpl(ax, facets=True)
plt.tight_layout()
plt.show()
# Link the crease factory it with the constraint client
gu_constant_length = GuConstantLength()
dof_constraints = fix(cpf.N_grid[0, 1], [1]) \
+ fix(cpf.N_grid[1, 1], [0, 1, 2]) \
+ fix(cpf.N_grid[1, (0, -1)], [2])
gu_dof_constraints = GuDofConstraints(dof_constraints=dof_constraints)
psi_max = np.pi / 4.0
diag_psi_constraints = [([(i, 1.0)], 0) for i in cpf.L_d_grid.flatten()]
gu_psi_constraints = \
GuPsiConstraints(forming_task=cpf,
psi_constraints=diag_psi_constraints +
[([(cpf.L_h_grid[1, 1], 1.0)],
lambda t: -psi_max * t),
])
sim_config = SimulationConfig(goal_function_type='none',
gu={'cl': gu_constant_length,
'dofs': gu_dof_constraints,
'psi': gu_psi_constraints},
acc=1e-5, MAX_ITER=10)
sim_task = SimulationTask(previous_task=cpf,
config=sim_config,
n_steps=5)
cp = sim_task.formed_object
cp.u[cpf.N_grid[(0, -1), 1], 2] = -1.0
sim_task.u_1
ftv = FTV()
#ftv.add(sim_task.sim_history.viz3d['node_numbers'], order=5)
ftv.add(sim_task.sim_history.viz3d['cp'])
ftv.add(gu_dof_constraints.viz3d['default'])
fta = FTA(ftv=ftv)
fta.init_view(a=200, e=35, d=50, f=(0, 0, 0), r=0)
fta.add_cam_move(a=200, e=34, n=5, d=50, r=0,
duration=10,
# vot_fn=lambda cmt: np.linspace(0, 1, 4),
azimuth_move='damped',
elevation_move='damped',
distance_move='damped')
fta.plot()
fta.configure_traits()
| gpl-3.0 |
wanggang3333/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
beni55/SimpleCV | SimpleCV/examples/util/ColorCube.py | 13 | 1901 | from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
for y in range(0,bins):
for z in range(0,bins):
b = ((x*skip)+offset)/255.0
g = ((y*skip)+offset)/255.0
r = ((z*skip)+offset)/255.0
idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
ax = fig.gca(projection='3d')
ax.set_xlabel('BLUE', color=(0,0,1) )
ax.set_ylabel('GREEN',color=(0,1,0))
ax.set_zlabel('RED',color=(1,0,0))
# Get the color histogram
img = cam.getImage().scale(0.3)
rgb = img.getNumpyCv2()
hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
hist = hist/np.max(hist)
# render everything
[ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
#[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
ax.set_xlim3d(0, bins-1)
ax.set_ylim3d(0, bins-1)
ax.set_zlim3d(0, bins-1)
azim = (azim+0.5)%360
ax.view_init(elev=35, azim=azim)
########### convert matplotlib to SimpleCV image
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
surf = pg.image.fromstring(raw_data, size, "RGB")
figure = Image(surf)
############ All done
figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
result = figure.blit(img, pos=(20,20))
result.save(disp)
fig.clf()
| bsd-3-clause |
barnabytprowe/great3-public | validation/plot_variable_submission.py | 2 | 3710 | #!/usr/bin/env python
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file plot_variable_submission.py
Handy command line executable script for plotting up a GREAT3 variable shear submission.
"""
# Constants
NFIELDS = 10
NBINS_THETA = 15
YLIM_EMODE = 2.e-5
YLIM_BMODE = 2.e-5
def plot(submission_filename, output_filename, nfields=NFIELDS, nbins_theta=NBINS_THETA,
ylim_emode=YLIM_EMODE, ylim_bmode=YLIM_BMODE):
"""Plot a submission.
"""
import numpy as np
import matplotlib.pyplot as plt
# Load the data from the input submission
data = np.loadtxt(submission_filename)
field, theta, map_E, map_B, maperr = (
data[:, 0].astype(int), data[:, 1], data[:, 2], data[:, 3], data[:, 4])
# Then plot (largely borrowed from the code in server/great3/evaluate.py)
plt.figure(figsize=(10, 8))
plt.subplot(211)
for ifield in range(nfields):
plt.semilogx(
theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
map_E[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
plt.ylim(-ylim_emode, ylim_emode)
plt.title(submission_filename+" E-mode")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plt.subplot(212)
for ifield in range(nfields):
plt.semilogx(
theta[ifield * nbins_theta: (ifield + 1) * nbins_theta],
map_B[ifield * nbins_theta: (ifield + 1) * nbins_theta], label="Field "+str(ifield))
plt.ylim(-ylim_bmode, ylim_bmode)
plt.title(submission_filename+" B-mode")
plt.xlabel("Theta [degrees]")
plt.ylabel("Ap. Mass Dispersion")
plt.axhline(ls="--", color="k")
plt.legend()
plt.savefig(output_filename)
return
if __name__ == "__main__":
import sys
# Get the input and output filenames from the command line
if len(sys.argv) != 3:
print "plot_variable_submission.py"
print "usage: ./plot_variable_submission.py input_submission output_filename"
sys.exit(1)
submission_filename = sys.argv[1]
output_filename = sys.argv[2]
plot(submission_filename, output_filename)
| bsd-3-clause |
skrzym/monday-morning-quarterback | Research/report.py | 1 | 13031 | from matplotlib import pyplot as plt
import matplotlib.ticker as plticker
import seaborn as sns
import pandas as pd
import numpy as np
import math
import warnings
from collections import Counter
import nfldatatools as nfltools
rs_pbp = nfltools.gather_data(playoffs=False)
po_pbp = nfltools.gather_data(playoffs=True)
sns.set_style("whitegrid")
#Set general plot properties
sns.set_style("white", {"axes.grid": True})
sns.set_context({"figure.figsize": (10, 7)})
################################################
# Figure 1 - HeatMap
def fig1():
filters=[
['Season', 2009, '>=']
]
yard_grouping = 10
fig,(ax1, ax2) = plt.subplots(1,2,figsize=(15,10))
nfltools.plotPassingHeatMap(rs_pbp, filters=filters, ax=ax1, yard_grouping=yard_grouping)
nfltools.plotPassingHeatMap(po_pbp, filters=filters, ax=ax2, yard_grouping=yard_grouping)
return fig
figure_1 = fig1()
###############################################################
# Figure 1 - HeatMap
def match(playtype):
valid_play_types = [
'Field Goal',
'Pass',
'Run',
'QB Kneel',
'Punt',
'Extra Point',
'Sack',
'Spike',
'Timeout'
]
return playtype in valid_play_types
def condense_pbp_data(df):
new_df = df[['qtr', 'down', 'TimeUnder','TimeSecs', 'yrdline100', 'ScoreDiff', 'PlayType','Season']]
new_df = new_df[new_df.PlayType.map(match)]
new_df = new_df[new_df['down'].isnull()==False]
return new_df
playoffs = condense_pbp_data(po_pbp)
regular = condense_pbp_data(rs_pbp)
def makeDF(season=2009):
rdf = regular#[regular.Season==season]
rdf = rdf.groupby('PlayType').agg({'qtr':len}).reset_index()
rdf.columns = ['PlayType', 'Count']
rdf['Percent Total'] = rdf.Count/rdf.Count.sum()*100
rdf['ID'] = 'Regular'
pdf = playoffs[playoffs.Season==season]
pdf = pdf.groupby('PlayType').agg({'qtr':len}).reset_index()
pdf.columns = ['PlayType', 'Count']
pdf['Percent Total'] = pdf.Count/pdf.Count.sum()*100
pdf['ID'] = 'Playoffs'
x = rdf.append(pdf, ignore_index=True)
fig, ax1 = plt.subplots(1,1,figsize=(12,10))
sns.barplot(ax=ax1, data=x, y='PlayType', x='Percent Total',hue='ID', order=['Pass', 'Run', 'Punt', 'Field Goal', 'QB Kneel'])
ax1.set_xlim(0,60)
return fig
figure_2 = makeDF()
###############################################################
# Figure 1 - HeatMap
def fig3():
sns.set_style('whitegrid')
sns.set_palette(['blue', 'green','red'])
fig, axes = plt.subplots(2, 1, figsize=(15,15))
shade = True
bw = '2'
sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Pass'].ScoreDiff.dropna(),label='Pass',shade=shade,bw=bw)
sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Run'].ScoreDiff.dropna(),label='Run',shade=shade,bw=bw)
sns.kdeplot(ax=axes[0],data=rs_pbp[rs_pbp.PlayType == 'Extra Point'].ScoreDiff.dropna(),label='Extra Point',shade=shade,bw=bw)
axes[0].set_xlim(-40,40)
axes[0].set_ylim(0,0.09)
sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Pass'].ScoreDiff.dropna(),label='Pass',shade=shade,bw=bw)
sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Run'].ScoreDiff.dropna(),label='Run',shade=shade,bw=bw)
sns.kdeplot(ax=axes[1],data=po_pbp[po_pbp.PlayType == 'Extra Point'].ScoreDiff.dropna(),label='Extra Point',shade=shade,bw=bw)
axes[1].set_xlim(-40,40)
axes[1].set_ylim(0,0.09)
#SMOOTH IT OUT!
return fig
figure_3 = fig3()
###############################################################
# Figure 1 - HeatMap
def plot_PlayType(df,stat,playtypelist=['Pass','Run','Field Goal','QB Kneel','Punt'],percent_total=False):
g = df.groupby([stat,'PlayType']).count().reset_index()
g = g[g.columns[0:3]]
last_col_name = g.columns[-1]
g1 = g.groupby([stat, 'PlayType']).agg({last_col_name: 'sum'})
if percent_total:
g1 = g1.groupby(level=1).apply(lambda x: 100 * x / float(x.sum()))
g1 = g1.reset_index()
g1 = g1[g1.PlayType.apply(lambda x: x in playtypelist)]
return sns.barplot(x=stat, y=last_col_name, hue="PlayType", data=g1)
def fig4():
fig = plt.figure(figsize=(16,32))
ax3 = fig.add_subplot(513)
ax3 = plot_PlayType(regular,'qtr',['Run','Pass'],False)
ax4 = fig.add_subplot(514)
ax4 = plot_PlayType(regular,'yrdline100',['Run','Pass'],False)
ax4.xaxis.set_ticks(range(4, 99, 5))
ax4.xaxis.set_ticklabels(range(5,100,5))
ax4.grid(True,'major','both')
return fig
figure_4 = fig4()
###############################################################
# Figure 1 - HeatMap
def fig5():
fig, axes = plt.subplots(2,1,figsize=(14,7))
sns.kdeplot(ax=axes[0],data=regular[regular.PlayType == 'Pass'].TimeSecs,bw=20,label='Pass')
sns.kdeplot(ax=axes[0],data=regular[regular.PlayType == 'Run'].TimeSecs,bw=20,label='Run')
loc = plticker.MultipleLocator(base=120.0) # this locator puts ticks at regular intervals
axes[0].xaxis.set_major_locator(loc)
axes[0].set_xlim(0,3600)
axes[0].set_ylim(0,0.00085)
axes[0].vlines([x*60 for x in [15,30,45]],0,0.0009,colors='black')
axes[0].grid(True,'major','y')
axes[0].grid(False,'major','x')
sns.kdeplot(ax=axes[1],data=playoffs[playoffs.PlayType == 'Pass'].TimeSecs,bw=20,label='Pass')
sns.kdeplot(ax=axes[1],data=playoffs[playoffs.PlayType == 'Run'].TimeSecs,bw=20,label='Run')
loc = plticker.MultipleLocator(base=120.0) # this locator puts ticks at regular intervals
axes[1].xaxis.set_major_locator(loc)
axes[1].set_xlim(0,3600)
axes[1].set_ylim(0,0.00085)
axes[1].vlines([x*60 for x in [15,30,45]],0,0.0009,colors='black')
axes[1].grid(True,'major','y')
axes[1].grid(False,'major','x')
return fig
figure_5 = fig5()
#################################################################
# Figure 1 - HeatMap
def fig6():
rs_fg = rs_pbp[rs_pbp.PlayType =='Field Goal'].groupby('FieldGoalResult').agg({'Date':len}).reset_index()
rs_fg.columns=['FieldGoalResult', 'Count']
rs_fg['Percent Total'] = rs_fg.Count.apply(lambda x: 100 * x / float(rs_fg.Count.sum()))
po_fg = po_pbp[po_pbp.PlayType =='Field Goal'].groupby('FieldGoalResult').agg({'Date':len}).reset_index()
po_fg.columns=['FieldGoalResult', 'Count']
po_fg['Percent Total'] = po_fg.Count.apply(lambda x: 100 * x / float(po_fg.Count.sum()))
sns.set_palette(['green', 'orange', 'red'])
fig, axes = plt.subplots(2, 2,sharey=True,figsize=(14,7))
order = ['Good','Blocked','No Good']
sns.violinplot(ax=axes[0][0], data=rs_pbp[rs_pbp.PlayType=='Field Goal'], x='FieldGoalDistance', y='FieldGoalResult',order=order, scale='width', bw=0.05)
sns.violinplot(ax=axes[1][0], data=po_pbp[po_pbp.PlayType=='Field Goal'], x='FieldGoalDistance', y='FieldGoalResult',order=order, scale='width', bw=0.05)
axes[0][0].set_xlim(0,100)
axes[1][0].set_xlim(0,100)
sns.barplot(ax=axes[0][1], data=rs_fg,y='FieldGoalResult', x='Percent Total',order=order)
sns.barplot(ax=axes[1][1], data=po_fg,y='FieldGoalResult', x='Percent Total',order=order)
axes[0][1].set_xlim(0,100)
axes[1][1].set_xlim(0,100)
axes[0][1].set_xticklabels(['0%','20%','40%','60%','80%','100%'])
axes[1][1].set_xticklabels(['0%','20%','40%','60%','80%','100%'])
axes[0][0].set_title('Field Goal Results by Distance')
axes[0][0].set_xlabel('')
axes[0][0].set_ylabel('Regular Season')
axes[0][1].set_title('Field Goal Results Distribution')
axes[0][1].set_xlabel('')
axes[0][1].set_ylabel('')
axes[1][0].set_ylabel('Playoffs')
axes[1][0].set_xlabel('Field Goal Distance (yds)')
axes[1][0].figure
axes[1][1].set_ylabel('')
axes[1][1].set_xlabel('Percent Total')
return fig
figure_6 = fig6()
#####################################################################
# Figure 1 - HeatMap
teams = [['ARI', 'Arizona', 'Cardinals', 'Arizona Cardinals'],
['ATL', 'Atlanta', 'Falcons', 'Atlanta Falcons'],
['BAL', 'Baltimore', 'Ravens', 'Baltimore Ravens'],
['BUF', 'Buffalo', 'Bills', 'Buffalo Bills'],
['CAR', 'Carolina', 'Panthers', 'Carolina Panthers'],
['CHI', 'Chicago', 'Bears', 'Chicago Bears'],
['CIN', 'Cincinnati', 'Bengals', 'Cincinnati Bengals'],
['CLE', 'Cleveland', 'Browns', 'Cleveland Browns'],
['DAL', 'Dallas', 'Cowboys', 'Dallas Cowboys'],
['DEN', 'Denver', 'Broncos', 'Denver Broncos'],
['DET', 'Detroit', 'Lions', 'Detroit Lions'],
['GB', 'Green Bay', 'Packers', 'Green Bay Packers', 'G.B.', 'GNB'],
['HOU', 'Houston', 'Texans', 'Houston Texans'],
['IND', 'Indianapolis', 'Colts', 'Indianapolis Colts'],
['JAC', 'Jacksonville', 'Jaguars', 'Jacksonville Jaguars', 'JAX'],
['KC', 'Kansas City', 'Chiefs', 'Kansas City Chiefs', 'K.C.', 'KAN'],
['LA', 'Los Angeles', 'Rams', 'Los Angeles Rams', 'L.A.'],
['MIA', 'Miami', 'Dolphins', 'Miami Dolphins'],
['MIN', 'Minnesota', 'Vikings', 'Minnesota Vikings'],
['NE', 'New England', 'Patriots', 'New England Patriots', 'N.E.', 'NWE'],
['NO', 'New Orleans', 'Saints', 'New Orleans Saints', 'N.O.', 'NOR'],
['NYG', 'Giants', 'New York Giants', 'N.Y.G.'],
['NYJ', 'Jets', 'New York Jets', 'N.Y.J.'],
['OAK', 'Oakland', 'Raiders', 'Oakland Raiders'],
['PHI', 'Philadelphia', 'Eagles', 'Philadelphia Eagles'],
['PIT', 'Pittsburgh', 'Steelers', 'Pittsburgh Steelers'],
['SD', 'San Diego', 'Chargers', 'San Diego Chargers', 'S.D.', 'SDG'],
['SEA', 'Seattle', 'Seahawks', 'Seattle Seahawks'],
['SF', 'San Francisco', '49ers', 'San Francisco 49ers', 'S.F.', 'SFO'],
['STL', 'St. Louis', 'Rams', 'St. Louis Rams', 'S.T.L.'],
['TB', 'Tampa Bay', 'Buccaneers', 'Tampa Bay Buccaneers', 'T.B.', 'TAM'],
['TEN', 'Tennessee', 'Titans', 'Tennessee Titans'],
['WAS', 'Washington', 'Redskins', 'Washington Redskins', 'WSH']]
teams_dict = {x[3]:x[0] for x in teams}
# Jacksonville Data Fix
rs_pbp.posteam = rs_pbp.posteam.replace('JAX', 'JAC')
rs_pbp.HomeTeam = rs_pbp.HomeTeam.replace('JAX', 'JAC')
rs_pbp.AwayTeam = rs_pbp.AwayTeam.replace('JAX', 'JAC')
pass_rush_attempts_by_team = rs_pbp.groupby(['posteam','Season']).agg(sum)[['PassAttempt','RushAttempt']]
pass_rush_attempts_by_team['PassRushRatio'] = pass_rush_attempts_by_team.apply(lambda x: (x.PassAttempt * 1.0) / x.RushAttempt, axis=1)
sns.set_palette('muted')
plot_df = pass_rush_attempts_by_team
plot_teams = teams_dict
def plotPassRushByTeam(team_focus_1, team_focus_2):
fig,ax = plt.subplots(1,1,figsize=(15,8))
for team in plot_teams:
if (plot_teams[team] != team_focus_1) or (plot_teams[team] != team_focus_1):
plt.plot(plot_df.loc[plot_teams[team]]['PassRushRatio'], color='0.91')
plt.plot(plot_df.loc[team_focus_1]['PassRushRatio'], color='Blue', axes=ax)
plt.plot(plot_df.loc[team_focus_2]['PassRushRatio'], color='Red', axes=ax)
return fig
def fig7():
sns.set_style('white')
return plotPassRushByTeam(team_focus_1 = 'NYG', team_focus_2 = 'NYJ')
figure_7 = fig7()
##########################################################
# Figure 1 - HeatMap
playoff_teams = {year:po_pbp.mask('Season',year).posteam.dropna().unique().tolist() for year in np.arange(2009,2017,1)}
def madeit(row):
team, season = row.name
return int(team in playoff_teams[season])
next_df = pass_rush_attempts_by_team.copy()
next_df['PO'] = next_df.apply(madeit, axis=1)
next_df.reset_index().groupby(['posteam','PO']).agg({'PassRushRatio':np.mean}).reset_index().pivot('posteam','PO','PassRushRatio')
def fig8():
sns.set_context('talk')
#sns.heatmap(data = pass_rush_attempts_by_team.reset_index().pivot('posteam','PO','PassRushRatio'),
# vmin=0,vmax=1,square=False,cmap='rainbow', annot=False)
fig,ax = plt.subplots(1,1)
new_df = next_df.reset_index().groupby(['posteam','PO']).agg({'PassRushRatio':np.mean}).reset_index().pivot('posteam','PO','PassRushRatio')
sns.heatmap(data = new_df, square=False, annot=False, cmap='Greens')
return fig
figure_8 = fig8()
############################################################
def fig9():
fig,ax = plt.subplots(1,1)
pass_rush_attempts_by_team.loc['DEN']['PassRushRatio'].plot()
return fig
figure_9 = fig9()
#############################################################
def fig10():
fig, ax = plt.subplots(1,1,figsize=(3,5))
sns.boxplot(data=next_df.reset_index(),x='PO', y='PassRushRatio', ax=ax)
return fig
figure_10 = fig10()
#############################################################
avg_prr_by_team = pass_rush_attempts_by_team.reset_index().groupby('posteam').agg({'PassRushRatio':np.mean}).sort_values('PassRushRatio')
avg_prr_by_season = pass_rush_attempts_by_team.reset_index().groupby('Season').agg({'PassRushRatio':np.mean}).sort_values('PassRushRatio')
def fig11():
with sns.axes_style('ticks'):
fig,ax = plt.subplots(1,1,figsize=(20,7))
sns.boxplot(data=next_df.reset_index(),x='posteam', y='PassRushRatio', ax=ax, order=avg_prr_by_team.index.tolist(),hue='PO')
return fig
figure_11 = fig11()
| mit |
mikeireland/pynrm | go.py | 1 | 3044 | # -*- coding: utf-8 -*-
"""
Created on Fri May 2 13:49:11 2014
@author: mireland
A script for testing... Change this to try out your own analysis.
"""
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
from azimuthalAverage import *
# This includes an AO Instrument called "aoinst"
import pypoise
import nirc2
import glob
import pdb
#Create a pypoise instance with a nirc2 AO instrument
pp = pypoise.PYPOISE(nirc2.NIRC2())
plt.ion()
#Reduction Directory - Lp full pupil
pp.aoinst.rdir = '/Users/mireland/tel/nirc2/redux/generic2015/'
pp.aoinst.cdir = '/Users/mireland/tel/nirc2/redux/TauPAH15/'
#Data directory
pp.aoinst.ddir = '/Users/mireland/data/nirc2/151128/'
pp.aoinst.read_summary_csv()
if(False):
pp.process_block(fstart='n1251.fits',fend='n1293.fits', dither=True)
if(False):
pp.process_block(fstart='n1493.fits',fend='n1517.fits', dither=True)
targname = 'AB Aur'
targname = 'SU Aur'
targname = 'RY Tau'
if(True):
#The argument "target_file" is just there to determine which object is the target.
summary_files = pp.poise_process(target=targname, use_powerspect=False)
print(summary_files)
if(True):
# summary_files = glob.glob('*LkCa*poise_cube*.fits')
implane_file = pp.aoinst.cdir + targname + '_implane.fits'
pxscale = 5.0
#pdb.set_trace()
if (True):
kp_implane = pp.kp_to_implane(summary_files=summary_files,
out_file=implane_file, sz=141, pxscale=pxscale, use_powerspect=False)
if (True):
#Automatic from here...
pgrid, crat, crat_sig, chi2, best_rchi2 = pp.implane_fit_binary(implane_file, maxrad=250)
print "Grid Fit: ", pgrid
pgrid = np.array(pgrid)
if (pgrid[2] > 0.5):
print "Contrast too high to use kerphase for fitting (i.e. near-equal binary)."
else:
p,errs,cov = pp.kp_binary_fit(summary_files,pgrid)
fitfile = open(targname + '_binaryfit.txt','w')
fitfile.write('Separation (mas) & Position angle (degs) & Contrast \\\\\n')
fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:6.4f} $\pm$ {5:6.4f} \\\\ \n'.format(\
p[0],errs[0], p[1],errs[1], p[2],errs[2]))
fitfile.write('Contrast (mags) & Separation (mas) & Position angle (degs) \\\\\n')
fit_crat = -2.5*np.log10(p[2])
fit_crat_sig = 2.5/np.log(10)*errs[2]/p[2]
fitfile.write('{0:5.2f} $\pm$ {1:5.2f} & {2:5.2f} $\pm$ {3:5.2f} & {4:5.3f} $\pm$ {5:5.3f} \\\\ \n'.format(\
fit_crat, fit_crat_sig, p[0],errs[0], p[1],errs[1] ))
fitfile.close()
a = azimuthalAverage(crat_sig*np.sqrt(best_rchi2), returnradii=True,binsize=1)
sep_null = a[0]*pxscale
contrast_null = -2.5*np.log10(5*a[1])
plt.clf()
plt.plot(sep_null, contrast_null)
plt.title(targname)
plt.xlabel("Separation (milli-arcsec)")
plt.ylabel("5-sigma contrast (mags)")
sep_out = np.arange(20,301,10)
contrast_out = np.interp(sep_out, sep_null, contrast_null)
for i in range(len(sep_out)):
print '{0:4d} {1:5.1f}'.format(int(sep_out[i]),contrast_out[i])
plt.axis((0,300,2,7))
plt.savefig(pp.aoinst.cdir + targname + '_contrast_curve.png')
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/mpl_toolkits/axisartist/axis_artist.py | 7 | 52735 | """
axis_artist.py module provides axis-related artists. They are
* axis line
* tick lines
* tick labels
* axis label
* grid lines
The main artist class is a AxisArtist and a GridlinesCollection. The
GridlinesCollection is responsible for drawing grid lines and the
AxisArtist is responsible for all other artists. The AxisArtist class
has attributes that are associated with each type of artists.
* line : axis line
* major_ticks : major tick lines
* major_ticklabels : major tick labels
* minor_ticks : minor tick lines
* minor_ticklabels : minor tick labels
* label : axis label
Typically, the AxisArtist associated with a axes will be accessed with
the *axis* dictionary of the axes, i.e., the AxisArtist for the bottom
axis is
ax.axis["bottom"]
where *ax* is an instance of axes (mpl_toolkits.axislines.Axes). Thus,
ax.axis["bottom"].line is an artist associated with the axis line, and
ax.axis["bottom"].major_ticks is an artist associated with the major tick
lines.
You can change the colors, fonts, line widths, etc. of these artists
by calling suitable set method. For example, to change the color of the major
ticks of the bottom axis to red,
ax.axis["bottom"].major_ticks.set_color("r")
However, things like the locations of ticks, and their ticklabels need
to be changed from the side of the grid_helper.
axis_direction
--------------
AxisArtist, AxisLabel, TickLabels have *axis_direction* attribute,
which adjusts the location, angle, etc.,. The *axis_direction* must be
one of [left, right, bottom, top] and they follow the matplotlib
convention for the rectangle axis.
For example, for the *bottom* axis (the left and right is relative to
the direction of the increasing coordinate),
* ticklabels and axislabel are on the right
* ticklabels and axislabel have text angle of 0
* ticklabels are baseline, center-aligned
* axislabel is top, center-aligned
The text angles are actually relative to (90 + angle of the direction
to the ticklabel), which gives 0 for bottom axis.
left bottom right top
ticklabels location left right right left
axislabel location left right right left
ticklabels angle 90 0 -90 180
axislabel angle 180 0 0 180
ticklabel va center baseline center baseline
axislabel va center top center bottom
ticklabel ha right center right center
axislabel ha right center right center
Ticks are by default direct opposite side of the ticklabels. To make
ticks to the same side of the ticklabels,
ax.axis["bottom"].major_ticks.set_ticks_out(True)
Following attributes can be customized (use set_xxx method)
* Ticks : ticksize, tick_out
* TickLabels : pad
* AxisLabel : pad
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
# FIXME :
# * : angles are given in data coordinate - need to convert it to canvas coordinate
import matplotlib.axes as maxes
import matplotlib.artist as martist
import matplotlib.text as mtext
import matplotlib.font_manager as font_manager
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, ScaledTranslation, \
IdentityTransform, TransformedPath, Bbox
from matplotlib.collections import LineCollection
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
import warnings
import numpy as np
import matplotlib.lines as mlines
from .axisline_style import AxislineStyle
class BezierPath(mlines.Line2D):
def __init__(self, path, *kl, **kw):
mlines.Line2D.__init__(self, [], [], *kl, **kw)
self._path = path
self._invalid = False
def recache(self):
self._transformed_path = TransformedPath(self._path, self.get_transform())
self._invalid = False
def set_path(self, path):
self._path = path
self._invalid = True
def draw(self, renderer):
if self._invalid:
self.recache()
if not self._visible: return
renderer.open_group('line2d')
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self._color)
gc.set_antialiased(self._antialiased)
gc.set_linewidth(self._linewidth)
gc.set_alpha(self._alpha)
if self.is_dashed():
cap = self._dashcapstyle
join = self._dashjoinstyle
else:
cap = self._solidcapstyle
join = self._solidjoinstyle
gc.set_joinstyle(join)
gc.set_capstyle(cap)
funcname = self._lineStyles.get(self._linestyle, '_draw_nothing')
if funcname != '_draw_nothing':
tpath, affine = self._transformed_path.get_transformed_path_and_affine()
lineFunc = getattr(self, funcname)
lineFunc(renderer, gc, tpath, affine.frozen())
gc.restore()
renderer.close_group('line2d')
class UnimplementedException(Exception):
pass
from matplotlib.artist import Artist
class AttributeCopier(object):
def __init__(self, ref_artist, klass=Artist):
self._klass = klass
self._ref_artist = ref_artist
super(AttributeCopier, self).__init__()
def set_ref_artist(self, artist):
self._ref_artist = artist
def get_ref_artist(self):
raise RuntimeError("get_ref_artist must overridden")
#return self._ref_artist
def get_attribute_from_ref_artist(self, attr_name, default_value):
get_attr_method_name = "get_"+attr_name
c = getattr(self._klass, get_attr_method_name)(self)
if c == 'auto':
ref_artist = self.get_ref_artist()
if ref_artist:
attr = getattr(ref_artist,
get_attr_method_name)()
return attr
else:
return default_value
return c
from matplotlib.lines import Line2D
class Ticks(Line2D, AttributeCopier):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
"""
def __init__(self, ticksize, tick_out=False, **kwargs):
self._ticksize = ticksize
self.locs_angles_labels = []
self.set_tick_out(tick_out)
self._axis = kwargs.pop("axis", None)
if self._axis is not None:
if "color" not in kwargs:
kwargs["color"] = "auto"
if ("mew" not in kwargs) and ("markeredgewidth" not in kwargs):
kwargs["markeredgewidth"] = "auto"
Line2D.__init__(self, [0.], [0.], **kwargs)
AttributeCopier.__init__(self, self._axis, klass=Line2D)
self.set_snap(True)
def get_ref_artist(self):
#return self._ref_artist.get_ticklines()[0]
return self._ref_artist.majorTicks[0].tick1line
def get_color(self):
return self.get_attribute_from_ref_artist("color", "k")
def get_markeredgecolor(self):
if self._markeredgecolor == 'auto':
return self.get_color()
else:
return self._markeredgecolor
def get_markeredgewidth(self):
return self.get_attribute_from_ref_artist("markeredgewidth", .5)
def set_tick_out(self, b):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = b
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_locs_angles(self, locs_angles):
self.locs_angles = locs_angles
def _update(self, renderer):
pass
_tickvert_path = Path([[0., 0.], [1., 0.]])
def draw(self, renderer):
if not self.get_visible():
return
self._update(renderer) # update the tick
size = self._ticksize
path_trans = self.get_transform()
# set gc : copied from lines.py
# gc = renderer.new_gc()
# self._set_gc_clip(gc)
# gc.set_foreground(self.get_color())
# gc.set_antialiased(self._antialiased)
# gc.set_linewidth(self._linewidth)
# gc.set_alpha(self._alpha)
# if self.is_dashed():
# cap = self._dashcapstyle
# join = self._dashjoinstyle
# else:
# cap = self._solidcapstyle
# join = self._solidjoinstyle
# gc.set_joinstyle(join)
# gc.set_capstyle(cap)
# gc.set_snap(self.get_snap())
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_foreground(self.get_markeredgecolor())
gc.set_linewidth(self.get_markeredgewidth())
gc.set_alpha(self._alpha)
offset = renderer.points_to_pixels(size)
marker_scale = Affine2D().scale(offset, offset)
if self.get_tick_out():
add_angle = 180
else:
add_angle = 0
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
for loc, angle in self.locs_angles:
marker_rotation.rotate_deg(angle+add_angle)
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(gc, self._tickvert_path, marker_transform,
Path(locs), path_trans.get_affine())
marker_rotation.clear()
gc.restore()
def test_ticks():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ticks = Ticks(ticksize=10, axis=ax.xaxis)
ax.add_artist(ticks)
locs_angles = [((0.2, 0.), 90),
((0.4, 0.), 120)]
ticks.set_locs_angles(locs_angles)
plt.draw()
class LabelBase(mtext.Text):
"""
A base class for AxisLabel and TickLabels. The position and angle
of the text are calculated by to offset_ref_angle,
text_ref_angle, and offset_radius attributes.
"""
def __init__(self, *kl, **kwargs):
self.locs_angles_labels = []
self._ref_angle = 0
self._offset_radius = 0.
super(LabelBase, self).__init__(*kl,
**kwargs)
self.set_rotation_mode("anchor")
self._text_follow_ref_angle = True
#self._offset_ref_angle = 0
def _set_ref_angle(self, a):
self._ref_angle = a
def _get_ref_angle(self):
return self._ref_angle
def _get_text_ref_angle(self):
if self._text_follow_ref_angle:
return self._get_ref_angle()+90
else:
return 0 #self.get_ref_angle()
def _get_offset_ref_angle(self):
return self._get_ref_angle()
def _set_offset_radius(self, offset_radius):
self._offset_radius = offset_radius
def _get_offset_radius(self):
return self._offset_radius
_get_opposite_direction = {"left":"right",
"right":"left",
"top":"bottom",
"bottom":"top"}.__getitem__
def _update(self, renderer):
pass
def draw(self, renderer):
if not self.get_visible(): return
self._update(renderer)
# save original and adjust some properties
tr = self.get_transform()
angle_orig = self.get_rotation()
offset_tr = Affine2D()
self.set_transform(tr+offset_tr)
text_ref_angle = self._get_text_ref_angle()
offset_ref_angle = self._get_offset_ref_angle()
theta = (offset_ref_angle)/180.*np.pi
dd = self._get_offset_radius()
dx, dy = dd * np.cos(theta), dd * np.sin(theta)
offset_tr.translate(dx, dy)
self.set_rotation(text_ref_angle+angle_orig)
super(LabelBase, self).draw(renderer)
offset_tr.clear()
# restore original properties
self.set_transform(tr)
self.set_rotation(angle_orig)
def get_window_extent(self, renderer):
self._update(renderer)
# save original and adjust some properties
tr = self.get_transform()
angle_orig = self.get_rotation()
offset_tr = Affine2D()
self.set_transform(tr+offset_tr)
text_ref_angle = self._get_text_ref_angle()
offset_ref_angle = self._get_offset_ref_angle()
theta = (offset_ref_angle)/180.*np.pi
dd = self._get_offset_radius()
dx, dy = dd * np.cos(theta), dd * np.sin(theta)
offset_tr.translate(dx, dy)
self.set_rotation(text_ref_angle+angle_orig)
bbox = super(LabelBase, self).get_window_extent(renderer).frozen()
offset_tr.clear()
# restore original properties
self.set_transform(tr)
self.set_rotation(angle_orig)
return bbox
def test_labelbase():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.plot([0.5], [0.5], "o")
label = LabelBase(0.5, 0.5, "Test")
a = -90
label._set_ref_angle(a)
label._set_offset_radius(offset_radius=50)
label.set_rotation(-90)
label.set(ha="center", va="top")
ax.add_artist(label)
plt.draw()
class AxisLabel(LabelBase, AttributeCopier):
"""
Axis Label. Derived from Text. The position of the text is updated
in the fly, so changing text position has no effect. Otherwise, the
properties can be changed as a normal Text.
To change the pad between ticklabels and axis label, use set_pad.
"""
def __init__(self, *kl, **kwargs):
axis_direction = kwargs.pop("axis_direction", "bottom")
self._axis = kwargs.pop("axis", None)
#super(AxisLabel, self).__init__(*kl, **kwargs)
LabelBase.__init__(self, *kl, **kwargs)
AttributeCopier.__init__(self, self._axis, klass=LabelBase)
self.set_axis_direction(axis_direction)
self._pad = 5
self._extra_pad = 0
def set_pad(self, pad):
"""
Set the pad in points. Note that the actual pad will be the
sum of the internal pad and the external pad (that are set
automatically by the AxisArtist), and it only set the internal
pad
"""
self._pad = pad
def get_pad(self):
"""
return pad in points. See set_pad for more details.
"""
return self._pad
def _set_external_pad(self, p):
"""
Set external pad IN PIXELS. This is intended to be set by the
AxisArtist, bot by user..
"""
self._extra_pad = p
def _get_external_pad(self):
"""
Get external pad.
"""
return self._extra_pad
def get_ref_artist(self):
return self._axis.get_label()
def get_text(self):
t = super(AxisLabel, self).get_text()
if t == "__from_axes__":
return self._axis.get_label().get_text()
return self._text
_default_alignments = dict(left=("bottom", "center"),
right=("top", "center"),
bottom=("top", "center"),
top=("bottom", "center"))
def set_default_alignment(self, d):
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
va, ha = self._default_alignments[d]
self.set_va(va)
self.set_ha(ha)
_default_angles = dict(left=180,
right=0,
bottom=0,
top=180)
def set_default_angle(self, d):
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self.set_rotation(self._default_angles[d])
def set_axis_direction(self, d):
"""
Adjust the text angle and text alignment of axis label
according to the matplotlib convention.
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
"""
if d not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self.set_default_alignment(d)
self.set_default_angle(d)
def get_color(self):
return self.get_attribute_from_ref_artist("color", "k")
def draw(self, renderer):
if not self.get_visible():
return
pad = renderer.points_to_pixels(self.get_pad())
r = self._get_external_pad() + pad
self._set_offset_radius(r)
super(AxisLabel, self).draw(renderer)
def get_window_extent(self, renderer):
if not self.get_visible():
return
pad = renderer.points_to_pixels(self.get_pad())
r = self._get_external_pad() + pad
self._set_offset_radius(r)
bb = super(AxisLabel, self).get_window_extent(renderer)
return bb
class TickLabels(AxisLabel, AttributeCopier): # mtext.Text
"""
Tick Labels. While derived from Text, this single artist draws all
ticklabels. As in AxisLabel, the position of the text is updated
in the fly, so changing text position has no effect. Otherwise,
the properties can be changed as a normal Text. Unlike the
ticklabels of the mainline matplotlib, properties of single
ticklabel alone cannot modified.
To change the pad between ticks and ticklabels, use set_pad.
"""
def __init__(self, **kwargs):
axis_direction = kwargs.pop("axis_direction", "bottom")
AxisLabel.__init__(self, **kwargs)
self.set_axis_direction(axis_direction)
#self._axis_direction = axis_direction
self._axislabel_pad = 0
#self._extra_pad = 0
# attribute copier
def get_ref_artist(self):
return self._axis.get_ticklabels()[0]
def set_axis_direction(self, label_direction):
"""
Adjust the text angle and text alignment of ticklabels
according to the matplotlib convention.
The *label_direction* must be one of [left, right, bottom,
top].
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
ticklabels angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
===================== ========== ========= ========== ==========
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
"""
if label_direction not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be one of "left", "right", "top", "bottom"')
self._axis_direction = label_direction
self.set_default_alignment(label_direction)
self.set_default_angle(label_direction)
def invert_axis_direction(self):
label_direction = self._get_opposite_direction(self._axis_direction)
self.set_axis_direction(label_direction)
def _get_ticklabels_offsets(self, renderer, label_direction):
"""
Calculates the offsets of the ticklabels from the tick and
their total heights. The offset only takes account the offset
due to the vertical alignment of the ticklabels, i.e.,if axis
direction is bottom and va is ;top', it will return 0. if va
is 'baseline', it will return (height-descent).
"""
whd_list = self.get_texts_widths_heights_descents(renderer)
if not whd_list:
return 0, 0
r = 0
va, ha = self.get_va(), self.get_ha()
if label_direction == "left":
pad = max([w for (w, h, d) in whd_list])
if ha == "left":
r = pad
elif ha == "center":
r = .5 * pad
elif label_direction == "right":
pad = max([w for (w, h, d) in whd_list])
if ha == "right":
r = pad
elif ha == "center":
r = .5 * pad
elif label_direction == "bottom":
pad = max([h for (w, h, d) in whd_list])
if va == "bottom":
r = pad
elif va == "center":
r =.5 * pad
elif va == "baseline":
max_ascent = max([(h-d) for (w, h, d) in whd_list])
max_descent = max([d for (w, h, d) in whd_list])
r = max_ascent
pad = max_ascent + max_descent
elif label_direction == "top":
pad = max([h for (w, h, d) in whd_list])
if va == "top":
r = pad
elif va == "center":
r =.5 * pad
elif va == "baseline":
max_ascent = max([(h-d) for (w, h, d) in whd_list])
max_descent = max([d for (w, h, d) in whd_list])
r = max_descent
pad = max_ascent + max_descent
#tick_pad = renderer.points_to_pixels(self.get_pad())
# r : offset
# pad : total height of the ticklabels. This will be used to
# calculate the pad for the axislabel.
return r, pad
_default_alignments = dict(left=("center", "right"),
right=("center", "left"),
bottom=("baseline", "center"),
top=("baseline", "center"))
# set_default_alignments(self, d)
_default_angles = dict(left=90,
right=-90,
bottom=0,
top=180)
def draw(self, renderer):
if not self.get_visible():
self._axislabel_pad = self._get_external_pad()
return
r, total_width = self._get_ticklabels_offsets(renderer,
self._axis_direction)
#self._set_external_pad(r+self._get_external_pad())
pad = self._get_external_pad() + \
renderer.points_to_pixels(self.get_pad())
self._set_offset_radius(r+pad)
#self._set_offset_radius(r)
for (x, y), a, l in self._locs_angles_labels:
if not l.strip(): continue
self._set_ref_angle(a) #+ add_angle
self.set_x(x)
self.set_y(y)
self.set_text(l)
LabelBase.draw(self, renderer)
self._axislabel_pad = total_width \
+ pad # the value saved will be used to draw axislabel.
def set_locs_angles_labels(self, locs_angles_labels):
self._locs_angles_labels = locs_angles_labels
def get_window_extents(self, renderer):
if not self.get_visible():
self._axislabel_pad = self._get_external_pad()
return []
bboxes = []
r, total_width = self._get_ticklabels_offsets(renderer,
self._axis_direction)
pad = self._get_external_pad() + \
renderer.points_to_pixels(self.get_pad())
self._set_offset_radius(r+pad)
for (x, y), a, l in self._locs_angles_labels:
self._set_ref_angle(a) #+ add_angle
self.set_x(x)
self.set_y(y)
self.set_text(l)
bb = LabelBase.get_window_extent(self, renderer)
bboxes.append(bb)
self._axislabel_pad = total_width \
+ pad # the value saved will be used to draw axislabel.
return bboxes
def get_texts_widths_heights_descents(self, renderer):
"""
return a list of width, height, descent for ticklabels.
"""
whd_list = []
for (x, y), a, l in self._locs_angles_labels:
if not l.strip(): continue
clean_line, ismath = self.is_math_text(l)
whd = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
whd_list.append(whd)
return whd_list
def test_ticklabels():
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.plot([0.2, 0.4], [0.5, 0.5], "o")
ticks = Ticks(ticksize=10, axis=ax.xaxis)
ax.add_artist(ticks)
locs_angles_labels = [((0.2, 0.5), -90, "0.2"),
((0.4, 0.5), -120, "0.4")]
tick_locs_angles = [(xy, a+180) for xy, a, l in locs_angles_labels]
ticks.set_locs_angles(tick_locs_angles)
ax.plot([0.5], [0.5], ",")
axislabel = AxisLabel(0.5, 0.5, "Test")
axislabel._set_offset_radius(20)
axislabel._set_ref_angle(0)
axislabel.set_axis_direction("bottom")
#axislabel._text_follow_ref_angle = True
#axislabel.set(va="center", ha="right")
ax.add_artist(axislabel)
if 1:
ticklabels = TickLabels(axis_direction="left")
ticklabels._locs_angles_labels = locs_angles_labels
#ticklabels.set_rotation(90)
ticklabels.set_pad(10)
ax.add_artist(ticklabels)
ax.set_xlim(0, 1); ax.set_ylim(0, 1)
plt.draw()
class GridlinesCollection(LineCollection):
def __init__(self, *kl, **kwargs):
"""
*which* : "major" or "minor"
*axis* : "both", "x" or "y"
"""
self._which = kwargs.pop("which", "major")
self._axis = kwargs.pop("axis", "both")
super(GridlinesCollection, self).__init__(*kl, **kwargs)
self.set_grid_helper(None)
def set_which(self, which):
self._which = which
def set_axis(self, axis):
self._axis = axis
def set_grid_helper(self, grid_helper):
self._grid_helper = grid_helper
def draw(self, renderer):
if self._grid_helper is not None:
self._grid_helper.update_lim(self.axes)
gl = self._grid_helper.get_gridlines(self._which, self._axis)
if gl:
self.set_segments([np.transpose(l) for l in gl])
else:
self.set_segments([])
super(GridlinesCollection, self).draw(renderer)
class AxisArtist(martist.Artist):
"""
An artist which draws axis (a line along which the n-th axes coord
is constant) line, ticks, ticklabels, and axis label.
"""
ZORDER=2.5
# LABELPAD : as property
def _set_labelpad(self, v):
return self.label.set_pad(v)
def _get_labelpad(self):
return self.label.get_pad()
LABELPAD = property(_get_labelpad, _set_labelpad)
def __init__(self, axes,
helper,
offset=None,
axis_direction="bottom",
**kw):
"""
*axes* : axes
*helper* : an AxisArtistHelper instance.
"""
#axes is also used to follow the axis attribute (tick color, etc).
super(AxisArtist, self).__init__(**kw)
self.axes = axes
self._axis_artist_helper = helper
if offset is None:
offset = (0, 0)
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(offset[0], offset[1],
self.dpi_transform)
self._label_visible = True
self._majortick_visible = True
self._majorticklabel_visible = True
self._minortick_visible = True
self._minorticklabel_visible = True
#if self._axis_artist_helper._loc in ["left", "right"]:
if axis_direction in ["left", "right"]:
axis_name = "ytick"
self.axis = axes.yaxis
else:
axis_name = "xtick"
self.axis = axes.xaxis
self._axisline_style = None
self._axis_direction = axis_direction
self._init_line()
self._init_ticks(axis_name, **kw)
self._init_offsetText(axis_direction)
self._init_label()
self.set_zorder(self.ZORDER)
self._rotate_label_along_line = False
# axis direction
self._tick_add_angle = 180.
self._ticklabel_add_angle = 0.
self._axislabel_add_angle = 0.
self.set_axis_direction(axis_direction)
# axis direction
def set_axis_direction(self, axis_direction):
"""
Adjust the direction, text angle, text alignment of
ticklabels, labels following the matplotlib convention for
the rectangle axes.
The *axis_direction* must be one of [left, right, bottom,
top].
===================== ========== ========= ========== ==========
property left bottom right top
===================== ========== ========= ========== ==========
ticklabels location "-" "+" "+" "-"
axislabel location "-" "+" "+" "-"
ticklabels angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the direction "+" and "-" are relative to the direction of
the increasing coordinate. Also, the text angles are actually
relative to (90 + angle of the direction to the ticklabel),
which gives 0 for bottom axis.
"""
if axis_direction not in ["left", "right", "top", "bottom"]:
raise ValueError('direction must be on of "left", "right", "top", "bottom"')
self._axis_direction = axis_direction
if axis_direction in ["left", "top"]:
#self._set_tick_direction("+")
self.set_ticklabel_direction("-")
self.set_axislabel_direction("-")
else:
#self._set_tick_direction("-")
self.set_ticklabel_direction("+")
self.set_axislabel_direction("+")
self.major_ticklabels.set_axis_direction(axis_direction)
self.label.set_axis_direction(axis_direction)
# def _set_tick_direction(self, d):
# if d not in ["+", "-"]:
# raise ValueError('direction must be on of "in", "out"')
# if d == "+":
# self._tick_add_angle = 0 #get_helper()._extremes=0, 10
# else:
# self._tick_add_angle = 180 #get_helper()._extremes=0, 10
def set_ticklabel_direction(self, tick_direction):
"""
Adjust the direction of the ticklabel.
ACCEPTS: [ "+" | "-" ]
Note that the label_direction '+' and '-' are relative to the
direction of the increasing coordinate.
"""
if tick_direction not in ["+", "-"]:
raise ValueError('direction must be one of "+", "-"')
if tick_direction == "-":
self._ticklabel_add_angle = 180
else:
self._ticklabel_add_angle = 0
def invert_ticklabel_direction(self):
self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360
self.major_ticklabels.invert_axis_direction()
self.minor_ticklabels.invert_axis_direction()
# def invert_ticks_direction(self):
# self.major_ticks.set_tick_out(not self.major_ticks.get_tick_out())
# self.minor_ticks.set_tick_out(not self.minor_ticks.get_tick_out())
def set_axislabel_direction(self, label_direction):
"""
Adjust the direction of the axislabel.
ACCEPTS: [ "+" | "-" ]
Note that the label_direction '+' and '-' are relative to the
direction of the increasing coordinate.
"""
if label_direction not in ["+", "-"]:
raise ValueError('direction must be one of "+", "-"')
if label_direction == "-":
self._axislabel_add_angle = 180
else:
self._axislabel_add_angle = 0
def get_transform(self):
return self.axes.transAxes + self.offset_transform
def get_helper(self):
"""
Return axis artist helper instance.
"""
return self._axis_artist_helper
def set_axisline_style(self, axisline_style=None, **kw):
"""
Set the axisline style.
*axisline_style* can be a string with axisline style name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("->,size=1.5")
set_arrowstyle("->", size=1.5)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available styles as a list of strings.
"""
if axisline_style==None:
return AxislineStyle.pprint_styles()
if isinstance(axisline_style, AxislineStyle._Base):
self._axisline_style = axisline_style
else:
self._axisline_style = AxislineStyle(axisline_style, **kw)
self._init_line()
def get_axisline_style(self):
"""
return the current axisline style.
"""
return self._axisline_style
def _init_line(self):
"""
Initialize the *line* artist that is responsible to draw the axis line.
"""
tran = self._axis_artist_helper.get_line_transform(self.axes) \
+ self.offset_transform
axisline_style = self.get_axisline_style()
if axisline_style is None:
self.line = BezierPath(self._axis_artist_helper.get_line(self.axes),
color=rcParams['axes.edgecolor'],
linewidth=rcParams['axes.linewidth'],
transform=tran)
else:
self.line = axisline_style(self, transform=tran)
def _draw_line(self, renderer):
self.line.set_path(self._axis_artist_helper.get_line(self.axes))
if self.get_axisline_style() is not None:
self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
self.line.draw(renderer)
def _init_ticks(self, axis_name, **kw):
trans=self._axis_artist_helper.get_tick_transform(self.axes) \
+ self.offset_transform
major_tick_size = kw.get("major_tick_size",
rcParams['%s.major.size'%axis_name])
major_tick_pad = kw.get("major_tick_pad",
rcParams['%s.major.pad'%axis_name])
minor_tick_size = kw.get("minor_tick_size",
rcParams['%s.minor.size'%axis_name])
minor_tick_pad = kw.get("minor_tick_pad",
rcParams['%s.minor.pad'%axis_name])
self.major_ticks = Ticks(major_tick_size,
axis=self.axis,
transform=trans)
self.minor_ticks = Ticks(minor_tick_size,
axis=self.axis,
transform=trans)
if axis_name == "xaxis":
size = rcParams['xtick.labelsize']
else:
size = rcParams['ytick.labelsize']
fontprops = font_manager.FontProperties(size=size)
self.major_ticklabels = TickLabels(size=size, axis=self.axis,
axis_direction=self._axis_direction)
self.minor_ticklabels = TickLabels(size=size, axis=self.axis,
axis_direction=self._axis_direction)
self.major_ticklabels.set(figure = self.axes.figure,
transform=trans,
fontproperties=fontprops)
self.major_ticklabels.set_pad(major_tick_pad)
self.minor_ticklabels.set(figure = self.axes.figure,
transform=trans,
fontproperties=fontprops)
self.minor_ticklabels.set_pad(minor_tick_pad)
def _get_tick_info(self, tick_iter):
"""
return ticks_loc_angle, ticklabels_loc_angle_label
ticks_loc_angle : list of locs and angles for ticks
ticklabels_loc_angle_label : list of locs, angles and labels for tickslabels
"""
ticks_loc_angle = []
ticklabels_loc_angle_label = []
tick_add_angle = self._tick_add_angle
ticklabel_add_angle = self._ticklabel_add_angle
for loc, angle_normal, angle_tangent, label in tick_iter:
angle_label = angle_tangent - 90
angle_label += ticklabel_add_angle
if np.cos((angle_label - angle_normal)/180.*np.pi) < 0.:
angle_tick = angle_normal
else:
angle_tick = angle_normal + 180
ticks_loc_angle.append([loc, angle_tick])
ticklabels_loc_angle_label.append([loc, angle_label, label])
return ticks_loc_angle, ticklabels_loc_angle_label
def _update_ticks(self, renderer):
# set extra pad for major and minor ticklabels:
# use ticksize of majorticks even for minor ticks. not clear what is best.
dpi_cor = renderer.points_to_pixels(1.)
if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
else:
self.major_ticklabels._set_external_pad(0)
self.minor_ticklabels._set_external_pad(0)
majortick_iter, minortick_iter = \
self._axis_artist_helper.get_tick_iterators(self.axes)
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(majortick_iter)
self.major_ticks.set_locs_angles(tick_loc_angle)
self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
#self.major_ticks.draw(renderer)
#self.major_ticklabels.draw(renderer)
# minor ticks
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(minortick_iter)
self.minor_ticks.set_locs_angles(tick_loc_angle)
self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
#self.minor_ticks.draw(renderer)
#self.minor_ticklabels.draw(renderer)
#if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
# self._draw_offsetText(renderer)
return self.major_ticklabels.get_window_extents(renderer)
def _draw_ticks(self, renderer):
extents = self._update_ticks(renderer)
self.major_ticks.draw(renderer)
self.major_ticklabels.draw(renderer)
self.minor_ticks.draw(renderer)
self.minor_ticklabels.draw(renderer)
if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
self._draw_offsetText(renderer)
return extents
def _draw_ticks2(self, renderer):
# set extra pad for major and minor ticklabels:
# use ticksize of majorticks even for minor ticks. not clear what is best.
dpi_cor = renderer.points_to_pixels(1.)
if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
self.major_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
self.minor_ticklabels._set_external_pad(self.major_ticks._ticksize*dpi_cor)
else:
self.major_ticklabels._set_external_pad(0)
self.minor_ticklabels._set_external_pad(0)
majortick_iter, minortick_iter = \
self._axis_artist_helper.get_tick_iterators(self.axes)
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(majortick_iter)
self.major_ticks.set_locs_angles(tick_loc_angle)
self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
self.major_ticks.draw(renderer)
self.major_ticklabels.draw(renderer)
# minor ticks
tick_loc_angle, ticklabel_loc_angle_label \
= self._get_tick_info(minortick_iter)
self.minor_ticks.set_locs_angles(tick_loc_angle)
self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
self.minor_ticks.draw(renderer)
self.minor_ticklabels.draw(renderer)
if (self.major_ticklabels.get_visible() or self.minor_ticklabels.get_visible()):
self._draw_offsetText(renderer)
return self.major_ticklabels.get_window_extents(renderer)
_offsetText_pos = dict(left=(0, 1, "bottom", "right"),
right=(1, 1, "bottom", "left"),
bottom=(1, 0, "top", "right"),
top=(1, 1, "bottom", "right"))
def _init_offsetText(self, direction):
x,y,va,ha = self._offsetText_pos[direction]
self.offsetText = mtext.Annotation("",
xy=(x,y), xycoords="axes fraction",
xytext=(0,0), textcoords="offset points",
#fontproperties = fp,
color = rcParams['xtick.color'],
verticalalignment=va,
horizontalalignment=ha,
)
self.offsetText.set_transform(IdentityTransform())
self.axes._set_artist_props(self.offsetText)
def _update_offsetText(self):
self.offsetText.set_text( self.axis.major.formatter.get_offset() )
self.offsetText.set_size(self.major_ticklabels.get_size())
offset = self.major_ticklabels.get_pad() + self.major_ticklabels.get_size() + 2.
self.offsetText.xyann= (0, offset)
def _draw_offsetText(self, renderer):
self._update_offsetText()
self.offsetText.draw(renderer)
def _init_label(self, **kw):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
labelsize = kw.get("labelsize",
rcParams['axes.labelsize'])
#labelcolor = kw.get("labelcolor",
# rcParams['axes.labelcolor'])
fontprops = font_manager.FontProperties(
size=labelsize,
weight=rcParams['axes.labelweight'])
textprops = dict(fontproperties = fontprops)
#color = labelcolor)
tr = self._axis_artist_helper.get_axislabel_transform(self.axes) \
+ self.offset_transform
self.label = AxisLabel(0, 0, "__from_axes__",
color = "auto", #rcParams['axes.labelcolor'],
fontproperties=fontprops,
axis=self.axis,
transform=tr,
axis_direction=self._axis_direction,
)
self.label.set_figure(self.axes.figure)
labelpad = kw.get("labelpad", 5)
self.label.set_pad(labelpad)
def _update_label(self, renderer):
if not self.label.get_visible():
return
fontprops = font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight'])
#pad_points = self.major_tick_pad
#print self._ticklabel_add_angle - self._axislabel_add_angle
#if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
if self._ticklabel_add_angle != self._axislabel_add_angle:
if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
or \
(self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
axislabel_pad = self.major_ticks._ticksize
else:
axislabel_pad = 0
else:
axislabel_pad = max([self.major_ticklabels._axislabel_pad,
self.minor_ticklabels._axislabel_pad])
#label_offset = axislabel_pad + self.LABELPAD
#self.label._set_offset_radius(label_offset)
self.label._set_external_pad(axislabel_pad)
xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
if xy is None: return
angle_label = angle_tangent - 90
x, y = xy
self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
self.label.set(x=x, y=y)
def _draw_label(self, renderer):
self._update_label(renderer)
self.label.draw(renderer)
def _draw_label2(self, renderer):
if not self.label.get_visible():
return
fontprops = font_manager.FontProperties(
size=rcParams['axes.labelsize'],
weight=rcParams['axes.labelweight'])
#pad_points = self.major_tick_pad
#print self._ticklabel_add_angle - self._axislabel_add_angle
#if abs(self._ticklabel_add_angle - self._axislabel_add_angle)%360 > 90:
if self._ticklabel_add_angle != self._axislabel_add_angle:
if (self.major_ticks.get_visible() and not self.major_ticks.get_tick_out()) \
or \
(self.minor_ticks.get_visible() and not self.major_ticks.get_tick_out()):
axislabel_pad = self.major_ticks._ticksize
else:
axislabel_pad = 0
else:
axislabel_pad = max([self.major_ticklabels._axislabel_pad,
self.minor_ticklabels._axislabel_pad])
#label_offset = axislabel_pad + self.LABELPAD
#self.label._set_offset_radius(label_offset)
self.label._set_external_pad(axislabel_pad)
xy, angle_tangent = self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
if xy is None: return
angle_label = angle_tangent - 90
x, y = xy
self.label._set_ref_angle(angle_label+self._axislabel_add_angle)
self.label.set(x=x, y=y)
self.label.draw(renderer)
def set_label(self, s):
self.label.set_text(s)
def get_tightbbox(self, renderer):
if not self.get_visible(): return
self._axis_artist_helper.update_lim(self.axes)
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
bb = []
self._update_ticks(renderer)
#if self.major_ticklabels.get_visible():
bb.extend(self.major_ticklabels.get_window_extents(renderer))
#if self.minor_ticklabels.get_visible():
bb.extend(self.minor_ticklabels.get_window_extents(renderer))
self._update_label(renderer)
#if self.label.get_visible():
bb.append(self.label.get_window_extent(renderer))
bb.append(self.offsetText.get_window_extent(renderer))
bb = [b for b in bb if b and (b.width!=0 or b.height!=0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return None
#self._draw_line(renderer)
#self._draw_ticks(renderer)
#self._draw_offsetText(renderer)
#self._draw_label(renderer)
@allow_rasterization
def draw(self, renderer):
'Draw the axis lines, tick lines and labels'
if not self.get_visible(): return
renderer.open_group(__name__)
self._axis_artist_helper.update_lim(self.axes)
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear().scale(dpi_cor, dpi_cor)
self._draw_ticks(renderer)
self._draw_line(renderer)
#self._draw_offsetText(renderer)
self._draw_label(renderer)
renderer.close_group(__name__)
#def get_ticklabel_extents(self, renderer):
# pass
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
"""
Toggle visibility of ticks, ticklabels, and (axis) label.
To turn all off, ::
axis.toggle(all=False)
To turn all off but ticks on ::
axis.toggle(all=False, ticks=True)
To turn all on but (axis) label off ::
axis.toggle(all=True, label=False))
"""
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
if _ticks is not None:
self.major_ticks.set_visible(_ticks)
self.minor_ticks.set_visible(_ticks)
if _ticklabels is not None:
self.major_ticklabels.set_visible(_ticklabels)
self.minor_ticklabels.set_visible(_ticklabels)
if _label is not None:
self.label.set_visible(_label)
def test_axis_artist():
global axisline
#self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
from mpl_toolkits.axisartist import AxisArtistHelperRectlinear
fig = plt.figure(1)
fig.clf()
ax=fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if 1:
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="left")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="left")
ax.add_artist(axisline)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="right")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="right")
ax.add_artist(axisline)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
axisline.set_label("TTT")
#axisline.label.set_visible(False)
ax.add_artist(axisline)
#axisline.major_ticklabels.set_axis_direction("bottom")
axisline.major_ticks.set_tick_out(False)
ax.set_ylabel("Test")
axisline.label.set_pad(5)
plt.draw()
def test_axis_artist2():
global axisline
#self._axislines[loc] = new_fixed_axis(loc=loc, axes=axes)
from mpl_toolkits.axislines import AxisArtistHelperRectlinear
fig = plt.figure(1)
fig.clf()
ax=fig.add_subplot(111)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
_helper = AxisArtistHelperRectlinear.Fixed(ax, loc="bottom")
axisline = AxisArtist(ax, _helper, offset=None, axis_direction="bottom")
axisline.set_label("TTT")
ax.add_artist(axisline)
#axisline.major_ticklabels.set_axis_direction("bottom")
axisline.major_ticks.set_tick_out(False)
ax.set_ylabel("Test")
plt.draw()
if __name__ == "__main__":
#test_labelbase()
#test_ticklabels()
test_axis_artist()
#test_axis_artist2()
# DONE
# *. ticks, ticklabels, axislabels
# *. workon axisartist
# TODO
| mit |
cwu2011/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
cxmo/project-beta | code/dataprep_script.py | 4 | 1758 |
""" The following script will apply a 3mm Gaussian filter on all the data spatially
and will save each smoothed run into the data folder as 'smoothed_run_i', where
0 <= i <= 7 is the index of the run.
"""
#Import libraries
import numpy as np
import scipy
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
import nibabel as nb
import matplotlib.pyplot as plt
import utils.data_loading as dl
#All file strings corresponding to BOLD data for subject 4
files = ['../data/task001_run001.bold_dico.nii.gz', '../data/task001_run002.bold_dico.nii.gz',
'../data/task001_run003.bold_dico.nii.gz', '../data/task001_run004.bold_dico.nii.gz',
'../data/task001_run005.bold_dico.nii.gz', '../data/task001_run006.bold_dico.nii.gz',
'../data/task001_run007.bold_dico.nii.gz', '../data/task001_run008.bold_dico.nii.gz']
all_data = []
for index, filename in enumerate(files):
new_data = dl.load_data(filename) #load_data function drops first 4 for us
num_vols = new_data.shape[-1]
if index != 0 and index != 7:
new_num_vols = num_vols - 4
new_data = new_data[:,:,:,:new_num_vols] #Drop last 4 volumes for middle runs
all_data.append(new_data)
#Create an array of all smoothed data
for index, run in enumerate(all_data):
num_vols = np.shape(run)[-1]
run_i_smoothed = []
for time in range(num_vols):
smoothed = dl.smooth_gauss(run, 3, time)
smoothed.shape = (132, 175, 48, 1)
run_i_smoothed.append(smoothed)
run_i_smoothed = np.concatenate(run_i_smoothed, axis = 3)
np.save('../data/smoothed_run_' + str(index), run_i_smoothed) #save in data folder
print('finished run' + str(index))
run_i_smoothed = None #Save memory space
| bsd-3-clause |
waynenilsen/statsmodels | statsmodels/sandbox/examples/try_quantile_regression1.py | 33 | 1188 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
polynomial regression with systematic deviations above
'''
import numpy as np
from statsmodels.compat.python import zip
from scipy import stats
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 0.1
nobs, k_vars = 500, 3
x = np.random.uniform(-1, 1, size=nobs)
x.sort()
exog = np.vander(x, k_vars+1)[:,::-1]
mix = 0.1 * stats.norm.pdf(x[:,None], loc=np.linspace(-0.5, 0.75, 4), scale=0.01).sum(1)
y = exog.sum(1) + mix + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.1)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.', alpha=0.5)
for lab, beta in zip(['ols', 'qr 0.1', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(exog, beta)
lw = 2
plt.plot(x, fitted, lw=lw, label=lab)
plt.legend()
plt.title('Quantile Regression')
plt.show()
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
bjackman/lisa | libs/utils/perf_analysis.py | 3 | 6952 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pylab as pl
import re
import sys
import trappy
import logging
# Regexp to match an rt-app generated logfile
TASK_NAME_RE = re.compile('.*\/rt-app-(.+)-[0-9]+.log')
class PerfAnalysis(object):
def __init__(self, datadir, tasks=None):
# Dataframe of all tasks performance data
self.perf_data = {}
# Folder containing all rt-app data
self.datadir = None
# Setup logging
self._log = logging.getLogger('PerfAnalysis')
# Load performance data generated by rt-app workloads
self.__loadRTAData(datadir, tasks)
# Keep track of the datadir from where data have been loaded
if len(self.perf_data) == 0:
raise ValueError('No performance data found on folder [{0:s}]'\
.format(datadir))
self.datadir = datadir
def __taskNameFromLog(self, logfile):
tname_match = re.search(TASK_NAME_RE, logfile)
if tname_match is None:
raise ValueError('The logfile [{0:s}] is not from rt-app'\
.format(logfile))
return tname_match.group(1)
def __logfileFromTaskName(self, taskname):
for logfile in glob.glob(
'{0:s}/rt-app-{1:s}.log'.format(self.datadir, taskname)):
return logfile
raise ValueError('No rt-app logfile found for task [{0:s}]'\
.format(taskname))
def tasks(self):
"""
Return the list of tasks for which performance data have been loaded
"""
if self.datadir is None:
raise ValueError("rt-app performance data not (yet) loaded")
return self.perf_data.keys()
def logfile(self, task):
"""
Return the logfile for the specified task
"""
if task not in self.perf_data:
raise ValueError('No logfile loaded for task [{0:s}]'\
.format(task))
return self.perf_data[task]['logfile']
def df(self, task):
"""
Return the PANDAS dataframe with the performance data for the
specified task
"""
if self.datadir is None:
raise ValueError("rt-app performance data not (yet) loaded")
if task not in self.perf_data:
raise ValueError('No dataframe loaded for task [{0:s}]'\
.format(task))
return self.perf_data[task]['df']
def __loadRTAData(self, datadir, tasks):
"""
Load peformance data of an rt-app workload
"""
if tasks is None:
# Lookup for all rt-app logfile into the specified datadir
for logfile in glob.glob('{0:s}/rt-app-*.log'.format(datadir)):
task_name = self.__taskNameFromLog(logfile)
self.perf_data[task_name] = {}
self.perf_data[task_name]['logfile'] = logfile
self._log.debug('Found rt-app logfile for task [%s]', task_name)
else:
# Lookup for specified rt-app task logfile into specified datadir
for task in tasks:
logfile = self.__logfileFromTaskName(task)
self.perf_data[task_name] = {}
self.perf_data[task_name]['logfile'] = logfile
self._log.debug('Found rt-app logfile for task [%s]', task_name)
# Load all the found logfile into a dataset
for task in self.perf_data.keys():
self._log.debug('Loading dataframe for task [%s]...', task)
df = pd.read_table(self.logfile(task),
sep='\s+',
skiprows=1,
header=0,
usecols=[1,2,3,4,7,8,9,10],
names=[
'Cycles', 'Run' ,'Period', 'Timestamp',
'Slack', 'CRun', 'CPeriod', 'WKPLatency'
])
# Normalize time to [s] with origin on the first event
start_time = df['Timestamp'][0]/1e6
df['Time'] = df['Timestamp']/1e6 - start_time
df.set_index(['Time'], inplace=True)
# Add performance metrics column, performance is defined as:
# slack
# perf = -------------
# period - run
df['PerfIndex'] = df['Slack'] / (df['CPeriod'] - df['CRun'])
# Keep track of the loaded dataframe
self.perf_data[task]['df'] = df
def plotPerf(self, task, title=None):
"""
Plot the Latency/Slack and Performance data for the specified task
"""
# Grid
gs = gridspec.GridSpec(2, 2, height_ratios=[4,1], width_ratios=[3,1]);
gs.update(wspace=0.1, hspace=0.1);
# Figure
plt.figure(figsize=(16, 2*6));
if title:
plt.suptitle(title, y=.97, fontsize=16,
horizontalalignment='center');
# Plot: Slack and Latency
axes = plt.subplot(gs[0,0]);
axes.set_title('Task [{0:s}] (start) Latency and (completion) Slack'\
.format(task));
data = self.df(task)[['Slack', 'WKPLatency']]
data.plot(ax=axes, drawstyle='steps-post', style=['b', 'g']);
# axes.set_xlim(x_min, x_max);
axes.xaxis.set_visible(False);
# Plot: Performance
axes = plt.subplot(gs[1,0]);
axes.set_title('Task [{0:s}] Performance Index'.format(task));
data = self.df(task)[['PerfIndex',]]
data.plot(ax=axes, drawstyle='steps-post');
axes.set_ylim(0, 2);
# axes.set_xlim(x_min, x_max);
# Plot: Slack Histogram
axes = plt.subplot(gs[0:2,1]);
data = self.df(task)[['PerfIndex',]]
data.hist(bins=30, ax=axes, alpha=0.4);
# axes.set_xlim(x_min, x_max);
pindex_avg = data.mean()[0];
pindex_std = data.std()[0];
self._log.info('PerfIndex, Task [%s] avg: %.2f, std: %.2f',
task, pindex_avg, pindex_std)
axes.axvline(pindex_avg, color='b', linestyle='--', linewidth=2);
# Save generated plots into datadir
figname = '{}/task_perf_{}.png'.format(self.datadir, task)
pl.savefig(figname, bbox_inches='tight')
| apache-2.0 |
jpmpentwater/cvxpy | examples/expr_trees/1D_convolution.py | 12 | 1453 | #!/usr/bin/env python
from cvxpy import *
import numpy as np
import random
from math import pi, sqrt, exp
def gauss(n=11,sigma=1):
r = range(-int(n/2),int(n/2)+1)
return [1 / (sigma * sqrt(2*pi)) * exp(-float(x)**2/(2*sigma**2)) for x in r]
np.random.seed(5)
random.seed(5)
DENSITY = 0.008
n = 1000
x = Variable(n)
# Create sparse signal.
signal = np.zeros(n)
nnz = 0
for i in range(n):
if random.random() < DENSITY:
signal[i] = random.uniform(0, 100)
nnz += 1
# Gaussian kernel.
m = 1001
kernel = gauss(m, m/10)
# Noisy signal.
std = 1
noise = np.random.normal(scale=std, size=n+m-1)
noisy_signal = conv(kernel, signal) #+ noise
gamma = Parameter(sign="positive")
fit = norm(conv(kernel, x) - noisy_signal, 2)
regularization = norm(x, 1)
constraints = [x >= 0]
gamma.value = 0.06
prob = Problem(Minimize(fit), constraints)
solver_options = {"NORMALIZE": True, "MAX_ITERS": 2500,
"EPS":1e-3}
result = prob.solve(solver=SCS,
verbose=True,
NORMALIZE=True,
MAX_ITERS=2500)
# Get problem matrix.
data, dims = prob.get_problem_data(solver=SCS)
# Plot result and fit.
import matplotlib.pyplot as plt
plt.plot(range(n), signal, label="true signal")
plt.plot(range(n), np.asarray(noisy_signal.value[:n, 0]), label="noisy convolution")
plt.plot(range(n), np.asarray(x.value[:,0]), label="recovered signal")
plt.legend(loc='upper right')
plt.show()
| gpl-3.0 |
shyamalschandra/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/applications/face_recognition.py | 191 | 5513 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
latticelabs/Mitty | setup.py | 1 | 2920 | from setuptools import setup, find_packages
__version__ = eval(open('mitty/version.py').read().split('=')[1])
setup(
name='mitty',
version=__version__,
description='Simulator for genomic data',
author='Seven Bridges Genomics',
author_email='kaushik.ghose@sbgenomics.com',
packages=find_packages(include=['mitty*']),
include_package_data=True,
entry_points={
# Register the built in plugins
'mitty.plugins.sfs': ['double_exp = mitty.plugins.site_frequency.double_exp'],
'mitty.plugins.variants': ['snp = mitty.plugins.variants.snp_plugin',
'delete = mitty.plugins.variants.delete_plugin',
'uniformdel = mitty.plugins.variants.uniform_deletions',
'uniformins = mitty.plugins.variants.uniform_insertions',
'insert = mitty.plugins.variants.insert_plugin',
#'inversion = mitty.plugins.variants.inversion_plugin',
#'low_entropy_insert = mitty.plugins.variants.low_entropy_insert_plugin'
],
'mitty.plugins.population': ['standard = mitty.plugins.population.standard',
'vn = mitty.plugins.population.vn'],
'mitty.plugins.reads': ['simple_sequential = mitty.plugins.reads.simple_sequential_plugin',
'simple_illumina = mitty.plugins.reads.simple_illumina_plugin'],
# Command line scripts
'console_scripts': ['genomes = mitty.genomes:cli',
'reads = mitty.reads:cli',
'perfectbam = mitty.benchmarking.perfectbam:cli',
'badbams = mitty.benchmarking.badbams:cli',
'alindel = mitty.benchmarking.indel_alignment_accuracy:cli',
'benchsummary = mitty.benchmarking.benchmark_summary:cli',
'vcf2pop = mitty.lib.vcf2pop:cli',
'bam2tfq = mitty.benchmarking.convert_bam_to_truth_fastq:cli',
'alindel_plot = mitty.benchmarking.indel_alignment_accuracy_plot:cli',
'misplot = mitty.benchmarking.misalignment_plot:cli',
'acubam = mitty.benchmarking.bam_accuracy:cli',
'migratedb = mitty.util.db_migrate:cli',
'plot_gc_bias = mitty.util.plot_gc_bias:cli',
'splitta = mitty.util.splitta:cli',
'kmers = mitty.util.kmers:cli',
'pybwa = mitty.util.pybwa:cli']
},
install_requires=[
'cython',
'setuptools>=11.0.0',
'numpy>=1.9.0',
'docopt>=0.6.2',
'click>=3.3',
'pysam>=0.8.1',
'h5py>=2.5.0',
'matplotlib>=1.3.0',
'scipy'
],
) | gpl-2.0 |
matthewwardrop/formulaic | benchmarks/plot.py | 1 | 1418 | import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv(os.path.join(os.path.dirname(__file__), 'benchmarks.csv')).sort_values('mean')
def grouped_barplot(df, cat, subcat, val, err, subcats=None, **kwargs):
# based on https://stackoverflow.com/a/42033734
categories = df[cat].unique()
x = np.arange(len(categories))
subcats = subcats or df[subcat].unique()
offsets = (np.arange(len(subcats)) - np.arange(len(subcats)).mean()) / (len(subcats) + 1.)
width = np.diff(offsets).mean()
for i, gr in enumerate(subcats):
dfg = df[df[subcat] == gr]
plt.bar(x + offsets[i], dfg[val].values, width=width,
label="{}".format(gr), yerr=dfg[err].values, capsize=6, **kwargs)
plt.xlabel(cat)
plt.ylabel(val)
plt.xticks(x, categories)
plt.legend(title=subcat, loc='center left', bbox_to_anchor=(1, 0.5))
def plot_benchmarks(toolings=None):
plt.figure(dpi=120, figsize=(10, 5))
grouped_barplot(data, cat='formula', subcat='tooling', val='mean', err='stderr', subcats=toolings, log=True)
plt.ylim(1e-2, None)
plt.grid()
plt.gca().set_axisbelow(True)
plt.ylabel("Mean Time (s)")
plt.xlabel("Formula")
plt.tight_layout()
plot_benchmarks(toolings=['formulaic', 'R', 'patsy', 'formulaic_sparse', 'R_sparse'])
plt.savefig(os.path.join(os.path.dirname(__file__), 'benchmarks.png'))
| mit |
meduz/NeuroTools | examples/matlab_vs_python/smallnet_acml.py | 3 | 4164 | # Created by Eugene M. Izhikevich, 2003 Modified by S. Fusi 2007
# Ported to Python by Eilif Muller, 2008.
#
# Notes:
#
# Requires matplotlib,ipython,numpy>=1.0.3
# On a debian/ubuntu based system:
# $ apt-get install python-matplotlib python-numpy ipython
#
# Start ipython with threaded plotting support:
# $ ipython -pylab
#
# At the resulting prompt, run the file by:
# In [1]: execfile('smallnet.py')
# Modules required
import numpy
import numpy.random as random
import acml_rng
# Bug fix for numpy version 1.0.4
numpy.lib.function_base.any = numpy.any
# For measuring performance
import time
t1 = time.time()
# Excitatory and inhibitory neuron counts
Ne = 1000
Ni = 4
N = Ne+Ni
# Synaptic couplings
Je = 250.0/Ne
Ji = 0.0
# reset depolarization (mV)
reset = 0.0
# refractory period (ms)
refr = 2.5
# Synaptic couplings (mV)
S = numpy.zeros((N,N))
S[:,:Ne] = Je*random.uniform(size=(N,Ne))
S[:,:Ni] = -Ji*random.uniform(size=(N,Ni))
# Connectivity
S[:,:Ne][random.uniform(size=(N,Ne))-0.9<=0.0]=0.0
S[:,Ne:][random.uniform(size=(N,Ni))-0.9<=0.0]=0.0
# (mV/ms) (lambda is a python keyword)
leak = 5.0
dt = 0.05
sdt = numpy.sqrt(dt)
# Statistics of the background external current
mb = 3.0; sb = 4.0
mue = mb; sigmae=sb
sigmai = 0.0
# State variable v, initial value of 0
v = numpy.zeros(N)
# Refractory period state variable
r = numpy.zeros(N)
# Spike timings in a list
firings = []
spikes = [[]]*N
print 'mu(nu=5Hz)=%f' % (mb+Ne*Je*.015-leak,)
print 'mu(nu=100Hz)=%f' % (mb+Ne*Je*.1-leak,)
# total duration of the simulation (ms)
duration = 400.0
t = numpy.arange(0.0,400.0,dt)
vt = numpy.zeros_like(t)
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
t1 = time.time()
for i,ti in enumerate(t):
# time for a strong external input
if ti>150.0:
mue = 6.5
sigmae = 7.5
# time to restore the initial statistics of the external current
if ti>300.0:
mue = mb
sigmae = sb
Iext = acml_rng.normal(1.0,N)
Iext[:Ne]*=sigmae
Iext[Ne:]*=sigmai
# Which neurons fired?
fired = numpy.nonzero(v>=20.0)[0]
if len(fired)>0:
# Save mean firing rate of the excitatory neurons
v[fired] = reset
r[fired] = refr
# Append spikes to the spike list
for n in fired:
# Spikes are stored by a (neuron, time) pair
# For easy plotting later
firings.append((n,ti))
# and as a list for each neuron
spikes[n].append(ti)
aux = v-dt*(leak-mue)+numpy.sum(S[:,fired],1)+sdt*Iext
else:
aux = v-dt*(leak-mue)+sdt*Iext;
# Neurons not in the refractory period
nr = numpy.nonzero(r<=0)[0]
# Bound voltages above 0.0
v[nr] = numpy.where(aux[nr]>=0.0,aux[nr],0.0)
# Progress refractory variable
nr = numpy.nonzero(r>0)[0]
r[nr]-=dt
# record the voltage trace of the zeroeth neuron
vt[i] = v[0]
t2 = time.time()
print 'Elapsed time is ', str(t2-t1), ' seconds.'
# -------------------------------------------------------------------------
# Plot everything
# -------------------------------------------------------------------------
def myplot():
global firings
t1 = time.time()
figure()
# Membrane potential trace of the zeroeth neuron
subplot(3,1,1)
vt[vt>=20.0]=65.0
plot(t,vt)
ylabel(r'$V-V_{rest}\ \left[\rm{mV}\right]$')
# Raster plot of the spikes of the network
subplot(3,1,2)
myfirings = array(firings)
myfirings_100 = myfirings[myfirings[:,0]<min(100,Ne)]
plot(myfirings_100[:,1],myfirings_100[:,0],'.')
axis([0, duration, 0, min(100,Ne)])
ylabel('Neuron index')
# Mean firing rate of the excitatory population as a function of time
subplot(3,1,3)
# 1 ms resultion of rate histogram
dx = 1.0
x = arange(0,duration,dx)
myfirings_Ne = myfirings[myfirings[:,0]<Ne]
mean_fe,x = numpy.histogram(myfirings_Ne[:,1],x)
plot(x,mean_fe/dx/Ne*1000.0,ls='steps')
ylabel('Hz')
xlabel('time [ms]')
t2 = time.time()
print 'Finished. Elapsed', str(t2-t1), ' seconds.'
#myplot()
| gpl-2.0 |
mmechelke/bayesian_xfel | bxfel/core/structure_factor.py | 1 | 18608 |
import numpy as np
import scipy
import re
import os
import hashlib
import csb
from csb.bio.io.wwpdb import StructureParser
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
class ScatteringFactor(object):
"""
Cacluates the density in reciprocal space as
F(s) = sum_m f_m(s) exp(-B_m s**2 / 4) exp(i*2pi*s*r)
where f_m(s) is approximated by four Gaussian distributions
and exp(-B_m s**2 / 4) are the thermal fluctuations
g_m(s) = f_m(s) * exp(-B_m s**2 / 4) are precomputed
"""
def __init__(self, structure=None):
if structure is None:
self._atoms = list()
self._bfactor = list()
self._seq = list()
self._elements = list()
else:
self._structure = structure
# For now only non hydrogen atoms
# TODO use hydrogens as well
self._atoms = []
for chain in structure:
for residue in structure[chain]:
for atom in residue:
a = residue[atom]
if not a.name.startswith("H"):
self._atoms.append(residue[atom])
self._seq = []
self._bfactor = []
self._elements = []
for atom in self._atoms:
self._seq.append(atom.element.name)
self._elements.append(atom.element.name)
if atom._bfactor is None:
self._bfactor.append(1.)
else:
self._bfactor.append(atom._bfactor)
self._seq = np.array(self._seq)
self._elements = set(self._elements)
self._bfactor = np.clip(self._bfactor, 1., 100.)
self._atom_type_params = {}
self._read_sf(fn=os.path.expanduser("~/projects/xfel/py/xfel/core/atomsf.lib"))
@classmethod
def from_isd(cls, universe):
obj = cls()
atoms = universe.atoms
for atom in atoms:
element = str(atom.properties['element'].name)
obj._elements.append(element)
obj._atoms.append(atom)
obj._seq.append(element)
try:
obj._bfactor.append(max(1.,atom.properties['bfactor']))
except KeyError:
obj._bfactor.append(1.)
obj._seq = np.array(obj._seq)
obj._bfactor = np.array(obj._bfactor)
obj._elements = set(obj._elements)
obj._bfactor = np.clip(obj._bfactor, 1., 100.)
return obj
def _read_sf(self, fn):
"""
Reads the coefficients for the analystical approximation
to scattering factors from ccp4 database
"""
float_pattern = '[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
atom_pattern = '[A-Za-z]'
atom_pattern = '[A-Za-z0-9-+]+'
line_pattern = ("({0})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})"
"\s+({1})\s+({1})").format(atom_pattern,float_pattern)
regex = re.compile(line_pattern)
with open(fn) as file_handle:
for line in file_handle:
if line.startswith("#"):
continue
m = regex.match(line)
atom_name = m.groups()[0]
a1, a2, a3, a4 = m.groups()[1], m.groups()[3], m.groups()[5], m.groups()[7]
b1, b2, b3, b4 = m.groups()[2], m.groups()[4], m.groups()[6], m.groups()[8]
c = m.groups()[9]
a = np.array([a1,a2,a3,a4],np.double)
b = np.array([b1,b2,b3,b4],np.double)
self._atom_type_params[atom_name] = (a,b,float(c))
def _calculate_gm(self, hkl):
"""
calculates the the product of scattering factor and
debye-waller factors
"""
f = np.zeros((len(self._atoms), hkl.shape[0]))
seq = self._seq
bfactor = self._bfactor
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
fx = c + np.dot(np.exp(np.outer(-s_tols,b)),a)
f[indices,:] = fx[:]
f *= np.exp(np.outer(-bfactor,s_tols))
return f
def _calculate_gm_grad(self, hkl):
"""
calculate the gradien of the scattering factor and
debye-waller factor
"""
seq = np.array([a.element.name for a in self._atoms])
f = np.zeros((len(self._atoms), hkl.shape[0]))
dfg = np.zeros((len(self._atoms), hkl.shape[0], 3))
bfactors = np.array([a.bfactor for a in self._atoms])
bfactors = np.clip(bfactors, 1., 100.)
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
bfactor = bfactors[indices]
g = np.exp(np.outer(-s_tols,b))
sf = np.dot(g, a) + c
gsf = np.sum(g * a[np.newaxis,:] * b[np.newaxis,:] * -0.5, -1)
dwf = np.exp(-np.outer(bfactor, s_tols))
gdwf = dwf * (bfactor * - 0.5)[:,np.newaxis]
grad = sf * gdwf + gsf * dwf
f[indices,:] = dwf * sf
dfg[indices,:,:] = grad[:,:,np.newaxis] * hkl
return dfg, f
def _calculate_scattering_factors(self, hkl):
"""
creates an approximation of the density in reciprocal space by
four gaussians
returns the scattering vectors
"""
seq = self._seq
bfactor = self._bfactor
f = np.zeros((len(self._atoms), hkl.shape[0]))
s_tols = 0.25 * (hkl**2).sum(-1)
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
fx = c + np.dot(np.exp(np.outer(-s_tols,b)),a)
f[indices,:] = fx[:]
return f
def _calculate_debyewaller_factors(self, hkl):
"""
"""
b = np.array(self._bfactor)
s_tols = 0.25 * (hkl**2).sum(-1)
t = np.exp(np.outer(-b,s_tols))
return t
def grad_s(self, X, hkl):
"""
Gradient with respect to the reciprocal space coordinates
@param X: atomic positions
@param hkl: reciprocal space positions
"""
seq = np.array([atom.element.name for atom in self._atoms])
bfactor = np.array([atom.bfactor for atom in self._atoms])
bfactor = np.clip(bfactor, 1., 100.)
s_tols = 0.25 * (hkl**2).sum(-1)
dw_factors = np.exp(np.outer(-bfactor, s_tols))
def grad_hkl(self, X, hkl):
seq = self._seq
bfactor = self._bfactor
bfactor = np.clip(bfactor, 1., 100.)
dg = np.zeros((len(self._atoms), hkl.shape[0], hkl.shape[1]))
g = np.zeros((len(self._atoms), hkl.shape[0]))
s_tols = 0.25 * (hkl**2).sum(-1)
dw_factors = np.exp(np.outer(-bfactor, s_tols))
ddw_factors = bfactor[:,np.newaxis] * dw_factors
for atom_type in self._elements:
a,b,c = self._atom_type_params[atom_type]
indices = np.where(seq==atom_type)[0]
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
dsf = np.dot(inner_exp, a*b)
gx = dsf * dw_factors[indices] + sf * ddw_factors[indices]
g[indices,:] = sf[:] * dw_factors[indices]
a = np.einsum('ab,bc->abc',gx, -0.5*hkl)
dg[indices,:,:] = a
phase = np.dot((2 * np.pi * X),hkl.T)
fx= np.sum(g * np.exp(1j * phase),0)
g2 = np.einsum('ba,bc->bac',g , 2 * np.pi * 1j *X)
dfx = np.einsum("abc,ab->bc",dg + g2,np.exp(1j * phase))
return dfx, fx
def calculate_structure_factors(self, X, hkl):
"""
TODO do this calculation in chunks to save space
"""
F = np.zeros(hkl.shape[0], dtype=np.complex128)
lim = hkl.shape[0]
step = 512
for i in range(0,lim,step):
_hkl = hkl[i:i+step]
f = self._calculate_scattering_factors(_hkl)
f *= self._calculate_debyewaller_factors(_hkl)
phase = np.dot((2 * np.pi * X),_hkl.T)
F[i:i+step] = np.sum(f * np.exp(1j * phase),0)
return F
def calculate_structure_factor_gradient(self, X, hkl):
"""
calculates the gradient of the fourier density
with respect to the atomic coordinates
"""
G = np.zeros(hkl.shape, dtype=np.complex128)
lim = hkl.shape[0]
F = np.zeros(hkl.shape[0], dtype=np.complex128)
step = 512
for i in range(0, lim, step):
_hkl = hkl[i:i+step]
dfg, f = self._calculate_gm_grad(_hkl)
phase = np.exp(1j * np.dot((2 * np.pi * X), _hkl.T))
gphase = phase[:, :, np.newaxis] *\
1j * 2 * np.pi * X[:, np.newaxis, :]
grad = dfg * phase[:, :, np.newaxis]
grad += f[:, :, np.newaxis] * gphase
F[i: i+step] = np.sum(f * phase, 0)
G[i: i+step, :] = np.sum(grad, 0)
return G, F
def calculate_structure_factor_gradient2(self, X):
"""
calculates the gradient of the fourier density
with respect to the atomic coordinates
"""
g_m = self._calculate_scattering_factors(hkl)
g_m *= self._calculate_debyewaller_factors(hkl)
phase = np.dot((2 * np.pi * X),self._hkl.T)
fx = (g_m *1j * 2 * np.pi * np.exp(1j * phase))
dF_dx = np.array([np.multiply.outer(s,fx_s) for s,fx_s in
zip(fx.T,self._hkl)])
return dF_dx
def calculate_intensity_gradient(self, X):
"""
calculates the gradient of the intensity with respect to the atomic coordinates dI/dx
"""
g_m = self._calculate_scattering_factors(self._hkl)
g_m *= self._calculate_debyewaller_factors(self._hkl)
phase = np.dot((2 * np.pi * X),self._hkl.T)
F = np.sum(g_m * np.exp(1j * phase),0)
fx = (g_m *1j * 2 * np.pi * np.exp(1j * phase))
dF_dx = np.array([np.multiply.outer(s,fx_s) for s,fx_s in zip(fx.T,self._hkl)])
dI_dx = np.conj(F[:,np.newaxis,np.newaxis]) * dF_dx + F[:,np.newaxis,np.newaxis] * np.conj(dF_dx)
return dI_dx
class Correlations(object):
def __init__(self, angles, nbins):
self._bin_angles(angles, nbins)
def _bin_angles(self, angles, nbins):
pass
def calculate_from_density(self, rho):
pass
class OnePhotonCorrelations(Correlations):
def _bin_angles(self, angles, nbins):
d = np.sqrt(np.sum(angles**2,-1))
lower = d.min()
upper = d.max()
axes = np.linspace(lower, upper, nbins)
indices = np.argsort(d)
bins = [[] for x in xrange(nbins)]
j = 0
for i in range(0,axes.shape[0]):
right_edge = axes[i]
print right_edge, i
while d[indices[j]] < right_edge:
bins[i-1].append(indices[j])
j += 1
bins[-1] = indices[j:].tolist()
self._axes = axes
self._bins = bins
def calculate_from_density(self, rho):
I = np.asarray([np.sum(rho.take(bin))
for bin in self._bins])
return I
class CachedScatteringFactor(ScatteringFactor):
def __init__(self, structure):
super(CachedScatteringFactor,self).__init__(structure)
self._f = None
def calculate_structure_factors(self, X, hkl):
if self._f is None:
print "calc f"
self._f = self._calculate_scattering_factors(hkl)
self._f *= self._calculate_debyewaller_factors(hkl)
else:
print "using cached f"
phase = np.dot((-2 * np.pi * X),hkl.T)
F = np.sum(self._f * np.exp(1j * phase),0)
return F
class SphericalSection(object):
def get(self,
n_points=20, radius=1.0,
polar_min=0., polar_max=np.pi,
azimut_min=0., azimut_max=2*np.pi):
theta = np.linspace(polar_min,polar_max, n_points)
phi = np.linspace(azimut_min, azimut_max, n_points)
x = np.outer(radius*np.sin(theta), np.cos(phi))
y = np.outer(radius*np.sin(theta), np.sin(phi))
z = np.outer(radius*np.cos(theta), np.ones(n_points))
return [x,y,z]
class EwaldSphereProjection(object):
def get_indices(self, wavelength, x,y,z):
"""
projects dectector points onto an Ewald Sphere
x, y, z are the pixel coordinates
x, y, z are all M x N matrices, where M x N is the detector size.
It is assumed that the detector is perpendicular to the Z-axis
"""
d = np.sqrt(x**2 + y**2 + z**2)
h = 1/wavelength * (x/d)
k = 1/wavelength * (y/d)
l = 1/wavelength * (z/d)
return h,k,l
def project(self, structure_factor, angle):
pass
if __name__ == "__main__":
import matplotlib
matplotlib.interactive(True)
import time
import os
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import pylab
from pylab import *
from csb.bio.io.wwpdb import StructureParser
from csb.bio.io.wwpdb import get
from xfel.core.density import Density
#structure = get("1L2Y")
#structure = StructureParser(os.path.expanduser("~/data/pdb/caffeine2.pdb")).parse()
#fn = os.path.expanduser("~/gsh.pdb")
structure = StructureParser(os.path.expanduser("~/projects/xfel/data/GTT_short.pdb")).parse()
x = np.linspace(-1.,1.,11)
h, k, l = np.meshgrid(x,x,x)
hkl = np.vstack([item.ravel() for item in [h,k,l]]).T
hkl = np.ascontiguousarray(hkl)
bf = np.random.random()
def bfactors(hkl, bf):
return np.exp(-0.25 * bf * (hkl**2).sum(-1))
def bfactor_grad(hkl):
return np.exp(-0.25 * bf * (hkl**2).sum(-1))[:,np.newaxis] * -0.5 * hkl * bf
a = np.random.random(4,)
b = np.random.random(4,)
c = 0.3
def sf(hkl,a,b,c):
s_tols = -0.25 * (hkl**2).sum(-1)
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
return sf
def sf_grad(hkl, a, b, c):
s_tols = -0.25 * (hkl**2).sum(-1)
sf = np.exp(np.outer(-s_tols,b)) * a[np.newaxis,:] * b[np.newaxis,:] * 0.5
return sf.sum(-1)[:,np.newaxis] * hkl
def gm(hkl, a, b, c, bf):
s_tols = -0.25 * (hkl**2).sum(-1)
inner_exp = np.exp(np.outer(-s_tols,b))
sf = np.dot(inner_exp, a) + c
bf = np.exp(bf * s_tols)
return sf * bf
def gm_grad(hkl, a, b, c, bf):
s_tols = -0.25 * (hkl**2).sum(-1)
g = np.exp(np.outer(-s_tols,b))
sf = np.dot(g, a) + c
gsf = np.sum(g * a[np.newaxis,:] * b[np.newaxis,:] * 0.5, -1)
bb = np.exp(bf * s_tols)
gb = bb * bf * - 0.5
grad = sf * gb + gsf * bb
return grad[:,np.newaxis] * hkl
sf = ScatteringFactor(structure)
X = np.array([a.vector for a in sf._atoms])
X -= X.mean(0)
if False:
n = 10
X = X[:n]
sf._seq = sf._seq[:n]
sf._elements = ['N', 'C']
sf._atoms = sf._atoms[:n]
sf._bfactor = sf._bfactor[:n]
dgm, f1 = sf._calculate_gm_grad(hkl)
f = sf._calculate_scattering_factors(hkl)
f *= sf._calculate_debyewaller_factors(hkl)
scatter(f.real.ravel(), f1.real.ravel())
dgm2 = dgm * 0.0
eps = 1e-7
for i in range(3):
hkl[:, i] += eps
fprime = sf._calculate_scattering_factors(hkl)
fprime *= sf._calculate_debyewaller_factors(hkl)
dgm2[:, :, i] = (fprime - f)/eps
hkl[:, i] -= eps
figure()
scatter(dgm.real.ravel(), dgm2.real.ravel())
G, FF = sf.calculate_structure_factor_gradient(X, hkl)
G2 = G * 0.0
F = sf.calculate_structure_factors(X, hkl)
eps = 1e-7
for i in range(3):
hkl[:,i] += eps
G2[:,i] = (sf.calculate_structure_factors(X, hkl) - F)/eps
hkl[:,i] -= eps
figure()
scatter(G.real.ravel(), G2.real.ravel())
scatter(G.imag.ravel(), G2.imag.ravel())
figure()
scatter(F.real.ravel(), FF.real.ravel())
show()
t0 = time.time()
G, FF = sf.calculate_structure_factor_gradient(X, hkl)
print "hkl gradient: {} \n".format(time.time() - t0)
t0 = time.time()
g = sf.grad_hkl(X, hkl)
print "X gradient: {} \n".format(time.time() - t0)
raise
sf = ScatteringFactor(structure)
sf._hkl = hkl
X = np.array([a.vector for a in sf._atoms])
X -= X.mean(0)
g,g2 = sf.grad_hkl(X, hkl)
F = sf.calculate_structure_factors(X,hkl)
gi= sf.calculate_intensity_gradient(X)
raise
F = F.reshape(h.shape)
rho = np.fft.fftshift(np.abs(np.fft.ifftn(F,[250,250,250])))
grid = Density.from_voxels(np.abs(F)**2,1.)
grid.write_gaussian(os.path.expanduser("~/mr.cube"))
raise
grid = Density.from_voxels(rho,1.)
grid.write_gaussian(os.path.expanduser("~/mr2.cube"))
raise
if True:
fig = pylab.figure()
ax = fig.add_subplot(131)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(0), 30)
pylab.show()
ax = fig.add_subplot(132)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(1), 30)
pylab.show()
ax = fig.add_subplot(133)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi, rho.sum(2), 30)
pylab.show()
raise
from mayavi import mlab
xi, yi, zi = np.mgrid[0:500:1,0:500:1,0:500:1]
obj = mlab.contour3d(rho, contours=10, transparent=True)
mlab.show()
from mayavi import mlab
obj = mlab.contour3d(np.abs(F), contours=10, transparent=True)
mlab.show()
raise
for ii in range(0,F.shape[0],25):
fig = pylab.figure()
ax = fig.add_subplot(111)
xi, yi= np.mgrid[0:500:1,0:500:1]
ax.contour(xi,yi,rho[ii,:,:], 30)
pylab.show()
I = np.abs(F)**2
fig = pylab.figure()
ax = fig.add_subplot(111)
nx, ny, nz = I.shape
xi, yi= np.mgrid[0:nx:1,0:ny:1]
ax.contour(xi,yi, I.sum(2), 15)
| mit |
bioinformatics-centre/AsmVar | src/AsmvarVarScore/FeatureToScore2.py | 2 | 12476 | """
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden, inbCoe):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', \
'Perfect Depth', 'Both ImPerfect Depth', 'InbreedCoefficient', \
'Map score', 'Mismapping Probability' , 'Average Identity', \
'ProperReadDepth', 'ImProperReadDepth']
al = 0.5
for i, data in enumerate ([distance, nr, aa, bb, inbCoe, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(10,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=al, linewidths = 0.1, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=al, linewidths = 0.1, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='*', c = 'Y', alpha=al, linewidths = 0.1, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper right')
plt.xlim(-10, 50)
if i == 9: plt.xlabel('Score', fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(10, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad], data[:,2][NEW][bad], marker='o', c = 'm', alpha=al, linewidths = 0.1, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=al, linewidths = 0.1, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.xlim(-3, 30)
plt.legend(loc='upper right')
if i == 9: plt.xlabel('Score', fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1: # VCF format
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
#if re.search(r'^PASS', col[6]): continue
#if not re.search(r'_TRAIN_SITE', col[7]): continue
#if not re.search(r'^PASS', col[6]): continue
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
inb = string.atof(vcfinfo['InbCoeff'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq, inb])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq, inb])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq, inb])
else:
mark.append([0, vq, inb])
# GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen:
raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]:
raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100:
raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0 # N ratio
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1]) # Alternate perfect
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3]) # Both imperfect
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']]) # Mapping score
mip = string.atof(sample.split(':')[fmat['MS']]) # Mismapping probability
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3]) # ave_iden in AGE
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]):
raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
inbreedCoe, phredScal = [], []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ]) # each person in the same position
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob))) # Phred scale
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
inbreedCoe.append ([mark[i][0], mark[i][1], mark[i][2]])
data.append([leg, alt, pro, ipr, n, bot])
print mark[i][0], mark[i][1], mark[i][2], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ), \
np.array (inbreedCoe ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 3.0
main(sys.argv[1:])
| mit |
camallen/aggregation | experimental/condor/animal_EM.py | 2 | 7334 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import csv
import matplotlib.pyplot as plt
import random
import math
import urllib
import matplotlib.cbook as cbook
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
sys.path.append("/home/greg/github/reduction/experimental/classifier")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
from iterativeEM import IterativeEM
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-23']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
big_userList = []
big_subjectList = []
animal_count = 0
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
alreadyDone = []
animals_in_image = {}
animal_index = -1
global_user_list = []
animal_to_image = []
zooniverse_list = []
condor_votes = {}
animal_votes = {}
#subject_vote = {}
results = []
to_sample_from = list(subject_collection.find({"state":"complete"}))
to_sample_from2 = list(subject_collection.find({"classification_count":1,"state":"active"}))
votes = []
sample = random.sample(to_sample_from,100)
#sample.extend(random.sample(to_sample_from2,1000))
# for subject_index,subject in enumerate(sample):
# print "== " + str(subject_index)
# zooniverse_id = subject["zooniverse_id"]
# for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
# if "user_name" in classification:
# user = classification["user_name"]
# else:
# user = classification["user_ip"]
#
# try:
# tt = index(big_userList,user)
# except ValueError:
# bisect.insort(big_userList,user)
for subject_index,subject in enumerate(sample):
print subject_index
zooniverse_id = subject["zooniverse_id"]
annotation_list = []
user_list = []
animal_list = []
#local_users = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
found_condor = False
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
animal_type = animal["animal"]
if not(animal_type in ["carcassOrScale","carcass"]):
annotation_list.append((x,y))
#print annotation_list
user_list.append(user)
animal_list.append(animal_type)
if not(user in global_user_list):
global_user_list.append(user)
#local_users.append(user)
if animal_type == "condor":
found_condor = True
except (ValueError,KeyError):
pass
#if there were any markings on the image, use divisive kmeans to cluster the points so that each
#cluster represents an image
if annotation_list != []:
user_identified,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True)
#fix split clusters if necessary
if user_identified != []:
user_identified,clusters = DivisiveKmeans(3).__fix__(user_identified,clusters,annotation_list,user_list,200)
for center,c in zip(user_identified,clusters):
animal_index += 1
#animal_votes.append([])
animal_to_image.append(zooniverse_id)
if not(zooniverse_id in animals_in_image):
animals_in_image[zooniverse_id] = [animal_index]
else:
animals_in_image[zooniverse_id].append(animal_index)
results.append((zooniverse_id,center))
for pt in c:
pt_index = annotation_list.index(pt)
user_index = global_user_list.index(user_list[pt_index])
animal_type = animal_list[annotation_list.index(pt)]
if animal_type == "condor":
votes.append((user_index,animal_index,1))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [1]
else:
animal_votes[animal_index].append(1)
else:
votes.append((user_index,animal_index,0))
if not(animal_index in animal_votes):
animal_votes[animal_index] = [0]
else:
animal_votes[animal_index].append(0)
print "=====---"
#print votes
classify = IterativeEM()
classify.__classify__(votes)
most_likely = classify.__getMostLikely__()
estimates = classify.__getEstimates__()
X = []
Y = []
X2 = []
Y2 = []
#for subject_index,zooniverse_id in enumerate(big_subjectList):
for ii in range(animal_index):
x = np.mean(animal_votes[ii])
y = estimates[ii][1]
X.append(x)
Y.append(y)
if math.fabs(x-y) > 0.3:
zooniverse_id,(centerX,centerY) = results[ii]
print x,y
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
slash_index = url.rfind("/")
object_id = url[slash_index+1:]
if not(os.path.isfile(base_directory+"/Databases/condors/images/"+object_id)):
urllib.urlretrieve (url, base_directory+"/Databases/condors/images/"+object_id)
image_file = cbook.get_sample_data(base_directory+"/Databases/condors/images/"+object_id)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
plt.plot([centerX,],[centerY,],'o')
plt.show()
# #if ((x < 0.5) and (y > 0.5)) or ((x > 0.5) and (y < 0.5)):
# subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
# print x,y
# print subject["location"]["standard"]
# #print most_likely[subject_index],estimates[subject_index],np.mean(subject_vote[zooniverse_id])
#else:
# print estimates[subject_index],0
plt.plot(X,Y,'.',color="blue")
plt.plot(X2,Y2,'.',color="red")
plt.xlim((-0.05,1.05))
plt.ylim((-0.05,1.05))
plt.show() | apache-2.0 |
Titan-C/scikit-learn | examples/linear_model/plot_ols.py | 74 | 2047 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# Make predictions using the testing set
diabetes_y_pred = regr.predict(diabetes_X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(diabetes_y_test, diabetes_y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(diabetes_y_test, diabetes_y_pred))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, diabetes_y_pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
sillvan/hyperspy | doc/user_guide/conf.py | 2 | 9753 | # -*- coding: utf-8 -*-
#
# HyperSpy User Guide documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 29 15:14:48 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.append('../../')
sys.path.append(os.path.abspath('../sphinxext'))
from hyperspy import Release
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'gen_rst',
'numpydoc',
'matplotlib.sphinxext.only_directives',
'sphinx.ext.intersphinx',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'ipython_console_highlighting'] # , 'rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HyperSpy User Guide [Draft]'
copyright = u'2011-2013, The HyperSpy Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = Release.version
# The full version, including alpha/beta/rc tags.
release = Release.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "HyperSpy User Guide v%s" % Release.version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HyperSpyUserGuidedoc'
# -- Options for LaTeX output --------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'HyperSpyUserGuide.tex', u'HyperSpy User Guide',
u'The HyperSpy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/hyperspy_logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hyperspyuserguide', u'HyperSpy User Guide Documentation',
[u'The HyperSpy Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HyperSpyUserGuide', u'HyperSpy User Guide Documentation',
u'The HyperSpy Developers', 'HyperSpyUserGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'HyperSpy User Guide'
epub_author = u'The HyperSpy Developers'
epub_publisher = u'he HyperSpy Developers'
epub_copyright = u'2011-2013, he HyperSpy Developers'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'hyperspyweb': ('http://hyperspy.org/', None)}
| gpl-3.0 |
wkfwkf/statsmodels | statsmodels/distributions/mixture_rvs.py | 27 | 9592 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
btabibian/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 102 | 2319 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
lw = 2
plt.plot(coef[:, feature_to_plot], color='seagreen', linewidth=lw,
label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], color='cornflowerblue', linewidth=lw,
label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot], color='gold', linewidth=lw,
label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
prheenan/prhUtil | python/IgorUtil.py | 2 | 8803 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
# import the patrick-specific utilities
import GenUtilities as pGenUtil
import PlotUtilities as pPlotUtil
import CheckpointUtilities as pCheckUtil
from scipy.signal import savgol_filter
DEF_FILTER_CONST = 0.005 # 0.5%
BASE_GROUP = "/Volumes/group/4Patrick/"
SUBDIR_BINARIES = "PRH_AFM_Databases/BinaryFilesTimeSeparationForce/"
def getDatabaseFolder():
"""
Returns the location of the database binary folder location
Args:
None
Returns:
Where the database is, as a string.
"""
# XXX TODO: right now assumes mac-style mounting...
return BASE_GROUP + SUBDIR_BINARIES
def getDatabaseFile(fileName,extension=".hdf"):
"""
Returns the absolute path to a previously-saved file with the given filename
Path is *not* guaranteed to exist, if the file hasn't been saved already.
Args:
fileName: the name of the file (usually according to the "TraceData"
table, field "FileTimSepFor")
extension: the recquired extension
Returns:
Where the file is located, an absolute path. Doesn't guarantee the file
*does* exist, just that *if* it does, it would be there.
"""
fileWithExt = pGenUtil.ensureEnds(fileName,extension)
return getDatabaseFolder() + fileWithExt
def DemoDir():
'''
:return: the absolute path to the demo directory
'''
return BASE_GROUP + "DemoData/IgorDemos/"
# all demo directories should have an input and output directory
def GetDemoInOut(demoName,baseDir=DemoDir(),raiseOnError=True):
"""
Returns the demo input and output directories, given a path baseDir and
name demoName. Recquires files to exist at "<baseDir><demoName>". If
encountering an error (e.g. permissions, something isn't mounted), raises
an error.
Args:
demoName: The name of the demo. Assumed to be the subdir under "basedir"
we want to use
baseDir: the base directory. Input and output directories are
"<baseDir><demoName>Input/" and "<baseDir><demoName>Output/", resp.
raiseOnError : if true, raises an error on an OS. otherwise, just
prints a warning that something went wrong.
Returns:
tuple of <inputDir>,<outputDir>
"""
fullBase = baseDir + demoName
inputV = pGenUtil.getSanitaryPath(fullBase + "/Input/")
outputV = pGenUtil.getSanitaryPath(fullBase + "/Output/")
try:
pGenUtil.ensureDirExists(inputV)
pGenUtil.ensureDirExists(outputV)
except OSError as e:
if (raiseOnError):
raise(e)
print("Warning, couldn't open demo directories based in " + fullBase +
". Most likely, not connected to JILA network")
return inputV,outputV
def DemoJilaOrLocal(demoName,localPath):
"""
Looks for the demo dir in the default (jila-hosted) space. If nothing is
found, looks in the paths specified by localpath (where it puts input
and output directories according to its name)
Args:
demoName: see GetDemoInOut
localPath: equivalent of baseDir in GetDemoInOut. Where we put the input and Output directories for the unit test if JILA can't be found.
Returns:
tuple of <inputDir>,<outputDir>
"""
inDir,outDir = GetDemoInOut(demoName,raiseOnError=False)
if (not pGenUtil.dirExists(inDir)):
print("Warning: Couldn't connect to JILA's Network. Using local data.")
# get "sanitary paths" which as OS-indepdent (in theory..)
localPath = pGenUtil.ensureEnds(localPath,"/")
inDir = pGenUtil.getSanitaryPath(localPath)
outDir = pGenUtil.getSanitaryPath(localPath + "Output" + demoName +"/")
pGenUtil.ensureDirExists(outDir)
if (not pGenUtil.dirExists(inDir)):
# whoops...
raise IOError("Demo Directory {:s} not found anywhere.".\
format(inDir))
return inDir,outDir
# read a txt or similarly formatted file
def readIgorWave(mFile,skip_header=3,skip_footer=1,comments="X "):
data = np.genfromtxt(mFile,comments=comments,skip_header=skip_header,
skip_footer=skip_footer)
return data
def savitskyFilter(inData,nSmooth = None,degree=2):
if (nSmooth is None):
nSmooth = int(len(inData)/200)
# POST: have an nSmooth
if (nSmooth % 2 == 0):
# must be odd
nSmooth += 1
# get the filtered version of the data
return savgol_filter(inData,nSmooth,degree)
def SplitIntoApproachAndRetract(sep,force,sepToSplit=None):
'''
Given a full force/sep curve, returns the approach/retract
according to before/after sepToSplit, cutting out the surface (assumed
at minimm separation )
:param sep: the separation, units not important. minimum is surface
:param force: the force, units not important
:param sepToSplot: the separation where we think the surface is. same units
as sep
'''
# find where sep is closest to sepToSplit before/after minIdx (surface)
if (sepToSplit is None):
sepToSplit = np.min(sep)
surfIdx = np.argmin(sep)
sepAppr = sep[:surfIdx]
sepRetr = sep[surfIdx:]
apprIdx = np.argmin(np.abs(sepAppr-sepToSplit))
retrIdx = surfIdx + np.argmin(np.abs(sepRetr-sepToSplit))
forceAppr = force[:apprIdx]
forceRetr = force[retrIdx:]
sepAppr = sep[:apprIdx]
sepRetr = sep[retrIdx:]
return sepAppr,sepRetr,forceAppr,forceRetr
def NormalizeSepForce(sep,force,surfIdx=None,normalizeSep=True,
normalizeFor=True,sensibleUnits=True):
if (sensibleUnits):
sepUnits = sep * 1e9
forceUnits = force * 1e12
else:
sepUnits = sep
forceUnits= force
if (surfIdx is None):
surfIdx = np.argmin(sep)
if (normalizeSep):
sepUnits -= sepUnits[surfIdx]
if (normalizeFor):
# reverse: sort low to high
sortIdx = np.argsort(sep)[::-1]
# get the percentage of points we want
percent = 0.05
nPoints = int(percent*sortIdx.size)
idxForMedian = sortIdx[:nPoints]
# get the median force at these indices
forceMedUnits = np.median(forceUnits[idxForMedian])
# correct the force
forceUnits -= forceMedUnits
# multiply it by -1 (flip)
forceUnits *= -1
return sepUnits,forceUnits
# plot a force extension curve with approach and retract
def PlotFec(sep,force,surfIdx = None,normalizeSep=True,normalizeFor=True,
filterN=None,sensibleUnits=True):
"""
Plot a force extension curve
:param sep: The separation in meters
:param force: The force in meters
:param surfIdx: The index between approach and retract. if not present,
intuits approximate index from minmmum Sep
:param normalizeSep: If true, then zeros sep to its minimum
:paran normalizeFor: If true, then zeros force to the median-filtered last
5% of data, by separation (presummably, already detached)
:param filterT: Plots the raw data in grey, and filters
the force to the Number of points given. If none, assumes default % of curve
:param sensibleUnits: Plots in nm and pN, defaults to true
"""
if (surfIdx is None):
surfIdx = np.argmin(sep)
sepUnits,forceUnits = NormalizeSepForce(sep,force,surfIdx,normalizeSep,
normalizeFor,sensibleUnits)
if (filterN is None):
filterN = int(np.ceil(DEF_FILTER_CONST*sepUnits.size))
# POST: go ahead and normalize/color
sepAppr = sepUnits[:surfIdx]
sepRetr = sepUnits[surfIdx:]
forceAppr = forceUnits[:surfIdx]
forceRetr = forceUnits[surfIdx:]
PlotFilteredSepForce(sepAppr,forceAppr,filterN=filterN,color='r',
label="Approach")
PlotFilteredSepForce(sepRetr,forceRetr,filterN=filterN,color='b',
label="Retract")
plt.xlim([min(sepUnits),max(sepUnits)])
pPlotUtil.lazyLabel("Separation [nm]","Force [pN]","Force Extension Curve")
return sepUnits,forceUnits
def filterForce(force,filterN=None):
if (filterN is None):
filterN = int(np.ceil(DEF_FILTER_CONST*force.size))
return savitskyFilter(force,filterN)
def PlotFilteredSepForce(sep,force,filterN=None,labelRaw=None,
linewidthFilt=2.0,color='r',**kwargs):
forceFilt =filterForce(force,filterN)
plt.plot(sep,forceFilt,color=color,lw=linewidthFilt,**kwargs)
# plot the raw data as grey
plt.plot(sep,force,color='k',label=labelRaw,alpha=0.3)
return forceFilt
| gpl-2.0 |
mathhun/scipy_2015_sklearn_tutorial | notebooks/figures/plot_rbf_svm_parameters.py | 19 | 2018 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from IPython.html.widgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| cc0-1.0 |
bcaine/maddux | maddux/environment.py | 1 | 6599 | """
Our experiment environment.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
GRAVITY = -9.81
class Environment:
def __init__(self, dimensions=None, dynamic_objects=None,
static_objects=None, robot=None):
"""An environment to run experiments in
:param dimensions: (Optional) The dimensions of env
:type dimensions: 1x3 numpy.array or None
:param dynamic_objects: (Optional) A list of objects that can move
:type dynamic_objects: list of maddux.objects.DynamicObject or None
:param static_objects: (Optional) A list of stationary objects
:type static_objects: list of maddux.objects.StaticObject or None
:param robot: (Optional) A robot to simulate
:type robot: maddux.robot.Arm or None
:rtype: None
"""
if dimensions is not None:
self.dimensions = np.array(dimensions)
else:
self.dimensions = np.array([10.0, 10.0, 100.0])
self.dynamic_objects = dynamic_objects if dynamic_objects else []
self.static_objects = static_objects if static_objects else []
self.robot = robot
def run(self, duration):
"""Run for a certain duration
:param duration: duration to run environment in seconds
:type duration: integer
:rtype: None
"""
duration_ms = int(duration * 1000)
for _ in xrange(duration_ms):
map(lambda obj: obj.step(), self.dynamic_objects)
if self.collision():
break
def animate(self, duration=None, save_path=None):
"""Animates the running of the program
:param duration: (Optional) Duration of animation in seconds
:type duration: int or None
:param save_path: (Optional) Path to save mp4 in instead of displaying
:type save_path: String or None
:rtype: None
"""
fps = 15
dynamic_iter_per_frame = 10 * fps
if duration is None:
if self.robot is None:
# Sensible Default
frames = fps * 5
else:
frames = len(self.robot.qs)
else:
frames = int(fps * duration)
def update(i):
ax.clear()
for _ in xrange(dynamic_iter_per_frame):
map(lambda obj: obj.step(), self.dynamic_objects)
# Check for collisions
self.collision()
if self.robot is not None:
next_q = self.robot.qs[i]
self.robot.update_angles(next_q)
self.plot(ax=ax, show=False)
fig = plt.figure(figsize=(8, 8))
ax = Axes3D(fig)
self.plot(ax=ax, show=False)
# If we don't assign its return to something, it doesn't run.
# Seems like really weird behavior..
ani = animation.FuncAnimation(fig, update, frames=frames, blit=False)
if save_path is None:
plt.show()
else:
Writer = animation.writers['ffmpeg']
writer = Writer(
fps=fps, metadata=dict(
artist='Maddux'), bitrate=1800)
ani.save(save_path, writer=writer)
def hypothetical_landing_position(self):
"""Find the position that the ball would land (or hit a wall)
:returns: Position (x, y, z) of hypothetical landing position of a
thrown object based on end effector velocity.
:rtype: numpy.ndarray or None
"""
pos = self.robot.end_effector_position().copy()
# Only need linear velocity
v = self.robot.end_effector_velocity()[0:3]
for t in np.linspace(0, 15, 5000):
# Check if it hit a target
for static in self.static_objects:
if static.is_hit(pos):
return pos.copy()
# Or a wall
for i in range(len(pos)):
in_negative_space = pos[i] <= 0
past_boundary = pos[i] >= self.dimensions[i]
if in_negative_space or past_boundary:
return pos.copy()
# Otherwise step forward
v[2] += t * GRAVITY
pos += t * v
# If we never hit anything (which is completely impossible (TM))
# return None
return None
def collision(self):
"""Check if any dynamic objects collide with any static
objects or walls.
:return: Whether there was a collision
:rtype: bool
"""
for dynamic in self.dynamic_objects:
if dynamic.attached:
continue
for static in self.static_objects:
if static.is_hit(dynamic.position):
dynamic.attach()
return True
for i in range(len(dynamic.position)):
in_negative_space = dynamic.position[i] <= 0
past_boundary = (dynamic.position[i] >=
self.dimensions[i])
if in_negative_space or past_boundary:
dynamic.attach()
return True
return False
def plot(self, ax=None, show=True):
"""Plot throw trajectory and ball
:param ax: Current axis if a figure already exists
:type ax: matplotlib.axes
:param show: (Default: True) Whether to show the figure
:type show: bool
:rtype: None
"""
if ax is None:
fig = plt.figure(figsize=(12, 12))
ax = Axes3D(fig)
# Set the limits to be environment ranges
ax.set_xlim([0, self.dimensions[0]])
ax.set_ylim([0, self.dimensions[1]])
if self.dynamic_objects:
zmax = max([o.positions[:, 2].max()
for o in self.dynamic_objects])
else:
zmax = 10
ax.set_zlim([0, max(10, zmax)])
# And set our labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
for dynamic in self.dynamic_objects:
# Plot Trajectory
ax.plot(dynamic.positions[:, 0], dynamic.positions[:, 1],
dynamic.positions[:, 2], 'r--', label='Trajectory')
# Plot objects
map(lambda obj: obj.plot(ax), self.dynamic_objects)
map(lambda obj: obj.plot(ax), self.static_objects)
if self.robot:
self.robot.plot(ax)
if show:
plt.show()
| mit |
matpalm/malmomo | viz_advantage_surface.py | 1 | 3160 | #!/usr/bin/env python
# hacktasic viz of the quadratic surface of advantage around the max output
# for a couple of clear block on right / left / center cases
import agents
import argparse
import base_network
import Image
import numpy as np
import models
import sys
import tensorflow as tf
import replay_memory
import util
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
np.set_printoptions(precision=5, threshold=10000, suppress=True, linewidth=10000)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--width', type=int, default=160, help="render width")
parser.add_argument('--height', type=int, default=120, help="render height")
agents.add_opts(parser)
models.add_opts(parser)
replay_memory.add_opts(parser)
util.add_opts(parser)
opts = parser.parse_args()
#opts.ckpt_dir = "runs/14/d/ckpts" # last known good
print >>sys.stderr, "OPTS", opts
# init our rl_agent
agent_cstr = eval("agents.NafAgent")
agent = agent_cstr(opts)
an = agent.network
# prepare three plots; one for each of block on left, in center, or on right
fig = plt.figure(figsize=plt.figaspect(0.3))
plt.title(opts.ckpt_dir)
R = np.arange(-1, 1.25, 0.25)
X, Y = np.meshgrid(R, R)
for plot_idx, (img_file, desc) in enumerate([("runs/14/d/imgs/ep_00007/e0000.png", "on left"),
("runs/14/d/imgs/ep_00007/e0019.png", "center"),
("runs/14/d/imgs/ep_00007/e0034.png", "on right")]):
print "calculating for", desc, "..."
# slurp in bitmap
img = Image.open(img_file)
img = np.array(img)[:,:,:3]
# collect q-value for all x, y values in one hit
all_x_y_pairs = np.stack(zip(np.ravel(X), np.ravel(Y)))
img_repeated = [img] * all_x_y_pairs.shape[0]
q_values = agent.sess.run(an.q_value,
feed_dict={an.input_state: img_repeated,
an.input_action: all_x_y_pairs,
base_network.FLIP_HORIZONTALLY: False})
Z = q_values.reshape(X.shape)
# plot as surface
ax = fig.add_subplot(1,3,plot_idx+1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, color='b', cmap=cm.coolwarm, linewidth=1)
ax.set_title(desc)
ax.set_xlabel("turn")
ax.set_ylabel("move")
ax.set_zlabel("q")
# include single vertical line where q was maximised (according to output_action)
output = agent.sess.run(an.output_action,
feed_dict={an.input_state: [img],
base_network.FLIP_HORIZONTALLY: False})
turn, move = np.squeeze(output)
q_value = agent.sess.run(an.q_value,
feed_dict={an.input_state: [img],
an.input_action: [[turn, move]],
base_network.FLIP_HORIZONTALLY: False})
print "turn", turn, "move", move, "=> q", np.squeeze(q_value), "Zmin=", np.min(Z), "Zmax=", np.max(Z)
ax.plot([turn, turn], [move, move], [np.min(Z), np.max(Z)], linewidth=5)
# render
plt.savefig("/tmp/test.png")
plt.show()
| mit |
AVGInnovationLabs/DoNotSnap | train.py | 1 | 4886 | import cv2
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
from AffineInvariantFeatures import AffineInvariant
from TemplateMatcher import TemplateMatch, Templates
from PIL import Image
from itertools import izip_longest
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report, roc_curve, auc, confusion_matrix, accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.tree import export_graphviz, DecisionTreeClassifier
def line_count(filename):
with open(filename) as data:
return sum(1 for line in data)
def read_image(filename):
return np.array(Image.open(filename.strip('\n')).convert('L'), np.uint8)
def read_file(filename, limit=0):
n = 0
lines = line_count(filename)
with open(filename) as data:
while True:
line = next(data, None)
if not line or (limit and n >= limit):
break
n += 1
print '\r%s %d/%d' % (filename, n, limit or lines),
try:
yield read_image(line)
except:
continue
def get_templates():
return np.array(list(read_file('templates.txt')))
def get_images(limit=0):
positive = read_file('positive.txt', limit / 2 if limit else 0)
negative = read_file('negative.txt', limit / 2 if limit else 0)
for p, n in izip_longest(positive, negative):
if p is not None:
yield (1, p)
if n is not None:
yield (0, n)
def get_dataset(limit):
return map(np.asarray, zip(*get_images(limit)))
def plot_roc(fpr, tpr, roc_auc):
# Plot all ROC curves
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Affine Invariant SURF + Decision Tree Classifier')
plt.legend(loc='lower right')
plt.show()
def plot_importance(feature_count, importances, indices):
plt.figure()
plt.title('Feature importances')
plt.bar(range(feature_count), importances[indices], color='r', align='center')
plt.xticks(range(feature_count), indices)
plt.xlim([-1, feature_count])
plt.show()
def main(name, dataset_size):
templates = get_templates()
print 'templates: %d' % len(templates)
labels, samples = get_dataset(dataset_size)
print 'samples: %d' % len(samples)
extractor = cv2.FeatureDetector_create('SURF')
detector = cv2.DescriptorExtractor_create('SURF')
print 'applying affine invariant transform'
affine = AffineInvariant(extractor, detector)
templates = affine.transform(templates)
samples = affine.transform(samples)
model = Pipeline([
('match', TemplateMatch(Templates(templates))), # XXX: hack to bypass cloning error
# ('reduce_dim', PCA(n_components = 12 * 6))
])
samples = model.fit_transform(samples)
rng = np.random.RandomState()
X_train, X_test, y_train, y_test = train_test_split(samples, labels, test_size=0.5, random_state=rng)
print 'train: %d, test: %d' % (len(X_train), len(X_test))
params = dict(
min_samples_split = [5, 6, 7, 8, 9, 10],
min_samples_leaf = [3, 4, 5, 6, 7],
max_leaf_nodes = [10, 9, 8, 7, 6],
class_weight = [{1: w} for w in [10, 8, 4, 2, 1]]
)
tree = DecisionTreeClassifier(max_depth=4, random_state=rng)
cvmodel = GridSearchCV(tree, params, cv=10, n_jobs=cv2.getNumberOfCPUs())
cvmodel.fit(X_train, y_train)
print 'grid scores'
for params, mean_score, scores in cvmodel.grid_scores_:
print '%0.3f (+/-%0.03f) for %r' % (mean_score, scores.std() * 2, params)
print 'best parameters'
print cvmodel.best_params_
importances = cvmodel.best_estimator_.feature_importances_
indices = np.argsort(importances)[::-1]
plot_importance(6, importances, indices)
y_pred = cvmodel.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print 'accuracy: %f' % accuracy
print classification_report(y_test, y_pred)
print confusion_matrix(y_test, y_pred)
y_score = cvmodel.predict_proba(X_test)[:, 1]
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plot_roc(fpr, tpr, roc_auc)
export_graphviz(cvmodel.best_estimator_, out_file=name + '.dot', class_names=['background', 'badge'], filled=True, rounded=True, special_characters=True)
pickle.dump(dict(params=params, pipe=model, model=cvmodel.best_estimator_), open(name + '.pkl', 'wb'))
if __name__ == '__main__':
name = sys.argv[1] if len(sys.argv) >= 2 else 'classifier'
dataset_size = int(sys.argv[2]) if len(sys.argv) >= 3 else 0
main(name, dataset_size)
| gpl-3.0 |
hstau/covar-cryo | covariance/rotatefill.py | 1 | 1524 | '''function [out] = imrotateFill(inp, angle)
% function [out] = imrotateFill(inp)
% Rotates an 2D image couterclockwise by angle in degrees
% Output image has the same dimension as input.
% Undefined regions are filled in by repeating the original image
% Note: input images must be square
%
% Copyright (c) UWM, Peter Schwander Mar. 20, 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
version = 'imrotateFill, V0.9';
Ported to python. Hstau Liao Oct. 2016
'''
import numpy as np
import logging,sys
import math
from scipy.ndimage.interpolation import rotate
import matplotlib.pyplot as plt
def op(input, angle, visual=False):
nPix = input.shape[0]
inpRep = np.tile(input, (3, 3))
outRep = rotate(inpRep, angle, reshape=False)
out = outRep[nPix:2 * nPix, nPix:2 * nPix]
if visual:
plt.subplot(2, 2, 1)
plt.imshow(input,cmap = plt.get_cmap('gray'))
plt.title('Input')
plt.subplot(2, 2, 2)
plt.imshow(out, cmap=plt.get_cmap('gray'))
plt.title('Output')
plt.subplot(2, 2, 3)
plt.imshow(inpRep, cmap=plt.get_cmap('gray'))
plt.title('Input 3x3')
plt.subplot(2, 2, 4)
plt.imshow(outRep, cmap=plt.get_cmap('gray'))
plt.title('Output 3x3')
plt.show()
return out
if __name__ == '__main__':
# tested using a 6x6 image
img = np.loadtxt(sys.argv[1])
ang = float(sys.argv[2]) # in degrees
visual = bool(sys.argv[3])
result = op(img,ang,visual)
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.13/_downloads/plot_compute_raw_data_spectrum.py | 8 | 3431 | """
==================================================
Compute the power spectral density of raw data
==================================================
This script shows how to compute the power spectral density (PSD)
of measurements on a raw dataset. It also show the effect of applying SSP
to the data to reduce ECG and EOG artifacts.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, show=False)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
# Alternatively, you may also create PSDs from Raw objects with ``psd_*``
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
| bsd-3-clause |
ashhher3/seaborn | seaborn/tests/test_utils.py | 11 | 11338 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from . import PlotTestCase
from .. import utils, rcmod
from ..utils import get_dataset_names, load_dataset
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(PlotTestCase):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
parekhmitchell/Machine-Learning | Machine Learning A-Z Template Folder/Part 2 - Regression/Section 8 - Decision Tree Regression/regression_template.py | 22 | 1424 | # Regression Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | mit |
jhuapl-boss/intern | examples/dvid/general_test.py | 1 | 3757 | import intern
from intern.remote.dvid import DVIDRemote
from intern.resource.dvid.resource import DataInstanceResource
from intern.resource.dvid.resource import RepositoryResource
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
########### NOTE ###########
# This test requires an accessible DVID instance
# DVID Data fetch:
dvid = DVIDRemote({"protocol": "http", "host": "localhost:8001",})
DATA_INSTANCE = "ex_EM"
ALIAS = "Test_alias"
########### Test Project API ###########
## Create DataInstanceResource and force the creation of a RepositoryResource
instance_setup_em = DataInstanceResource(
DATA_INSTANCE, None, "uint8blk", ALIAS, "Example channel.", datatype="uint8"
)
# Get the channel and create a project
instance_actual_repo = dvid.create_project(instance_setup_em)
print("Repo UUID:" + instance_actual_repo)
# Create an instance within given repo(UUID)
instance_setup_anno = DataInstanceResource(
DATA_INSTANCE + "_the_second",
instance_actual_repo,
"uint8blk",
ALIAS,
"Example channel.",
datatype="uint8",
)
instance_actual_anno_uuid = dvid.create_project(instance_setup_anno)
print("Data Instance UUID: {}".format(instance_actual_anno_uuid))
# Create a dummy repo with the Repository Resource for deletion
instance_setup_em_delete = RepositoryResource(None, "Test_for_deletion")
instance_actual_em_delete_uuid = dvid.create_project(instance_setup_em_delete)
instance_actual_em_delete = dvid.delete_project(instance_setup_em_delete)
print("Successfully deleted Repo project: {}".format(instance_actual_em_delete_uuid))
# Delete the data instance of a repo
instance_setup_em_delete = DataInstanceResource(
DATA_INSTANCE, None, "uint8blk", ALIAS, "Example channel.", datatype="uint8"
)
instance_actual_em_delete_uuid = dvid.create_project(instance_setup_em_delete)
dvid.delete_project(dvid.get_instance(instance_actual_em_delete_uuid, DATA_INSTANCE))
print(
"Successfully deleted data instance project: {}".format(
instance_actual_em_delete_uuid
)
)
########### Test Versioning API ###########
# Set up a new project with a channel
instance_setup_merge = DataInstanceResource(
DATA_INSTANCE + "_the_second",
None,
"uint8blk",
"Mege_repo",
"Example channel.",
datatype="uint8",
)
chan_actual_parent1 = dvid.create_project(instance_setup_merge)
print("\nParent1 UUID: " + chan_actual_parent1)
commit_1 = dvid.commit(chan_actual_parent1, note="Test the commit")
branch_1 = dvid.branch(chan_actual_parent1, note="Test the versioning system once")
branch_2 = dvid.branch(chan_actual_parent1, note="Test the versioning system twice")
print("Created branches {} and {} from Parent1".format(branch_1, branch_2))
########### Test Metadat API ###########
# Set up a new project with a channel
print(dvid.get_info(instance_setup_merge))
print(dvid.get_server_info())
print(dvid.get_server_compiled_types())
dvid.server_reload_metadata()
########### Test Voluming API ###########
#
# Prepare the data
img = Image.open("<somedir>/*.png")
data_tile = np.asarray(img)
print(data_tile.shape)
data_tile = np.expand_dims(data_tile, axis=0)
data_tile = data_tile.copy(order="C")
# Create the project
instance_setup_up = DataInstanceResource(
DATA_INSTANCE + "_the_second",
None,
"imagetile",
"Upload Test",
"Example channel.",
datatype="uint8",
)
chan_actual_up = dvid.create_project(instance_setup_up)
# Create the cutout
dvid.create_cutout(instance_setup_up, 0, [0, 454], [0, 480], [0, 1], data_tile)
print("Create cutout successful")
# Get the cutout
got_cutout = dvid.get_cutout(instance_setup_up, 0, [0, 454], [0, 480], [0, 1])
# Check for equality
if (got_cutout == data_tile).all():
print("Both tiles equate")
| apache-2.0 |
Og192/Python | machine-learning-algorithms/memoryNN/memNN_ExactTest.py | 2 | 7973 | import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
corpusSize = 1977#2358
testDataSize = 49
testMaxLength = 82
batchSize = 1
vectorLength = 50
sentMaxLength = 82
hopNumber = 3
classNumber = 4
num_epoches = 2000
weightDecay = 0.001
trainDatasetPath = "/home/laboratory/memoryCorpus/train/"
testDatasetPath = "/home/laboratory/memoryCorpus/test/"
resultOutput = '/home/laboratory/memoryCorpus/result/'
if not os.path.exists(resultOutput):
os.makedirs(resultOutput)
def loadData(datasetPath, shape, sentMaxLength):
print("load " + datasetPath)
datasets = np.loadtxt(datasetPath, np.float)
datasets = np.reshape(datasets, shape)
return datasets
atten = np.loadtxt("atten", np.float,delimiter= ',').reshape((1, 2 * vectorLength))
atten_b = -0.002059555053710938
flinearLayer_W = np.loadtxt("linearLayer_W", np.float,delimiter= ',').reshape((vectorLength, vectorLength))
flinearLayer_b = np.loadtxt("linearLayer_b", np.float,delimiter= ',').reshape((vectorLength, 1))
fsoftmaxLayer_W = np.loadtxt("softmaxLayer_W", np.float,delimiter= ',').reshape((classNumber, vectorLength))
fsoftmaxLayer_b = np.loadtxt("softmaxLayer_b", np.float,delimiter= ',').reshape((classNumber, 1))
def shuffleDatasets(datasets, orders):
shuffleDatasets = np.zeros(datasets.shape)
index = 0
for i in orders:
shuffleDatasets[index] = datasets[i]
index += 1
del datasets
return shuffleDatasets
def generateData(datasetPath,corpusSize, sentMaxLength):
contxtWordsDir = datasetPath + 'contxtWords'
aspectWordsDir = datasetPath + 'aspectWords'
labelsDir = datasetPath + 'labels'
positionsDir = datasetPath + 'positions'
sentLengthsDir = datasetPath + 'sentLengths'
maskDir = datasetPath + 'mask'
contxtWords = loadData(contxtWordsDir, (corpusSize, vectorLength, sentMaxLength), sentMaxLength)
aspectWords = loadData(aspectWordsDir, (corpusSize, vectorLength, 1), sentMaxLength)
labels = loadData(labelsDir, (corpusSize, classNumber, 1), sentMaxLength)
position = loadData(positionsDir, (corpusSize, 1, sentMaxLength), sentMaxLength)
sentLength = loadData(sentLengthsDir, (corpusSize, 1, 1), sentMaxLength)
mask = loadData(maskDir, (corpusSize, 1, sentMaxLength), sentMaxLength)
return (contxtWords, aspectWords, labels, position, sentLength, mask)
def plot(loss_list):
plt.cla()
plt.plot(loss_list)
plt.draw()
plt.pause(0.0001)
contxtWords_placeholder = tf.placeholder(tf.float32, [vectorLength, None], name="contxtWords")#
aspectWords_placeholder = tf.placeholder(tf.float32, [vectorLength, 1], name="aspectWords")
labels_placeholder = tf.placeholder(tf.float32, [classNumber, 1], name="labels")
position_placeholder = tf.placeholder(tf.float32, [1, None], name="position")#
sentLength_placeholder = tf.placeholder(tf.float32, [1, 1], name="sentLength")
mask_placeholder = tf.placeholder(tf.float32, [1, None], name="mask")
attention_W = tf.Variable(atten, dtype = tf.float32, name="attention_W")
attention_b = tf.Variable(atten_b, dtype = tf.float32, name="attention_b")
linearLayer_W = tf.Variable(flinearLayer_W , dtype=tf.float32, name="linearLayer_W")
linearLayer_b = tf.Variable(flinearLayer_b , dtype = tf.float32, name="linearLayer_b")
softmaxLayer_W = tf.Variable(fsoftmaxLayer_W, dtype= tf.float32, name="softmaxLayer_W")
softmaxLayer_b = tf.Variable(fsoftmaxLayer_b, dtype= tf.float32, name="softmaxLayer_b")
vaspect = aspectWords_placeholder
for i in range(hopNumber):
Vi = 1.0 - position_placeholder / sentLength_placeholder - (hopNumber / vectorLength) * (1.0 - 2.0 * (position_placeholder / sentLength_placeholder))
Mi = Vi * contxtWords_placeholder
expanded_vaspect = vaspect
for j in range(sentMaxLength - 1):
expanded_vaspect = tf.concat(1, [expanded_vaspect, vaspect])
attentionInputs = tf.concat(0, [Mi, expanded_vaspect])
gi = tf.tanh(tf.matmul(attention_W, attentionInputs) + attention_b) + mask_placeholder
alpha = tf.nn.softmax(gi)
linearLayerOut = tf.matmul(linearLayer_W, vaspect) + linearLayer_b
vaspect = tf.reduce_sum(alpha * Mi, 1, True) + linearLayerOut
linearLayerOut = tf.matmul(softmaxLayer_W, vaspect) + softmaxLayer_b
# regu = tf.reduce_sum(attention_W * attention_W)
# regu += tf.reduce_sum(attention_b * attention_b)
# regu += tf.reduce_sum(linearLayer_W * linearLayer_W)
# regu += tf.reduce_sum(linearLayer_b * linearLayer_b)
# regu += tf.reduce_sum(softmaxLayer_W * softmaxLayer_W)
# regu += tf.reduce_sum(softmaxLayer_b * softmaxLayer_b)
# regu = weightDecay * regu
calssification = tf.nn.softmax(linearLayerOut - tf.reduce_max(linearLayerOut), dim=0)
total_loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(linearLayerOut - tf.reduce_max(linearLayerOut), labels_placeholder, dim=0))
ada = tf.train.AdagradOptimizer(0.01)# 0.3 for hopNumber = 1
train_step = ada.minimize(total_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
loss_list = []
contxtWords, aspectWords, labels, position, sentLength, mask = generateData(trainDatasetPath, corpusSize, sentMaxLength)
contxtWordsT,aspectWordsT,labelsT,positionT,sentLengthT, maskT = generateData(testDatasetPath, testDataSize, testMaxLength)
for epoch_idx in range(num_epoches):
results = []
sum_loss= 0.0
print("New data, epoch", epoch_idx)
orders = np.arange(corpusSize)
np.random.shuffle(orders)
contxtWords = shuffleDatasets(contxtWords, orders)
aspectWords = shuffleDatasets(aspectWords, orders)
labels = shuffleDatasets(labels, orders)
position = shuffleDatasets(position, orders)
sentLength = shuffleDatasets(sentLength, orders)
mask = shuffleDatasets(mask, orders)
count = 0
correct = 0
for i in range(corpusSize):
_calssification, _total_loss, _train_step, _attention_W = sess.run(
[calssification, total_loss, train_step, attention_W],
feed_dict=
{
contxtWords_placeholder:contxtWords[i],
aspectWords_placeholder:aspectWords[i],
labels_placeholder :labels[i],
position_placeholder :position[i],
sentLength_placeholder :sentLength[i],
mask_placeholder :mask[i]
}
)
sum_loss += _total_loss
if np.argmax(_calssification.reshape(4)) == np.argmax(labels[i]):
correct += 1.0
count += 1
# print(_attention_W)
# print(sentLength[i])
print("Iteration", epoch_idx, "Loss", sum_loss / (corpusSize * 2), "train_step", _train_step, "Accuracy: ", float(correct / count))
loss_list.append(sum_loss / (corpusSize * 2))
plot(loss_list)
count = 0
correct = 0
for i in range(testDataSize):
_calssification = sess.run(
calssification,
feed_dict=
{
contxtWords_placeholder:contxtWordsT[i],
aspectWords_placeholder:aspectWordsT[i],
labels_placeholder :labelsT[i],
position_placeholder :positionT[i],
sentLength_placeholder :sentLengthT[i],
mask_placeholder :maskT[i]
}
)
results.append(_calssification.reshape(4))
if np.argmax(_calssification.reshape(4)) == np.argmax(labelsT[i]):
correct += 1.0
count += 1
print("test Accuracy: ", float(correct / count))
np.savetxt(resultOutput + "predict_" + str(epoch_idx) + ".txt", np.asarray(results, dtype=np.float32), fmt='%.5f',delimiter=' ') | gpl-2.0 |
moreati/numpy | numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/examples/old_animation/histogram_tkagg.py | 3 | 1847 | """
This example shows how to use a path patch to draw a bunch of
rectangles for an animated histogram
"""
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # do this before importing pylab
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig, ax = plt.subplots()
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
nrects = len(left)
# here comes the tricky part -- we have to set up the vertex and path
# codes arrays using moveto, lineto and closepoly
# for each rect: 1 for the MOVETO, 3 for the LINETO, 1 for the
# CLOSEPOLY; the vert for the closepoly is ignored but we still need
# it to keep the codes aligned with the vertices
nverts = nrects*(1+3+1)
verts = np.zeros((nverts, 2))
codes = np.ones(nverts, int) * path.Path.LINETO
codes[0::5] = path.Path.MOVETO
codes[4::5] = path.Path.CLOSEPOLY
verts[0::5,0] = left
verts[0::5,1] = bottom
verts[1::5,0] = left
verts[1::5,1] = top
verts[2::5,0] = right
verts[2::5,1] = top
verts[3::5,0] = right
verts[3::5,1] = bottom
barpath = path.Path(verts, codes)
patch = patches.PathPatch(barpath, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
def animate():
if animate.cnt>=100:
return
animate.cnt += 1
# simulate new data coming in
data = np.random.randn(1000)
n, bins = np.histogram(data, 100)
top = bottom + n
verts[1::5,1] = top
verts[2::5,1] = top
fig.canvas.draw()
fig.canvas.manager.window.after(100, animate)
animate.cnt = 0
fig.canvas.manager.window.after(100, animate)
plt.show()
| apache-2.0 |
lin-credible/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/tools/PlottingTools.py | 1 | 15373 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 31 11:07:20 2014
@author: junz
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.colors as col
import scipy.ndimage as ni
import ImageAnalysis as ia
try:
import skimage.external.tifffile as tf
except ImportError:
import tifffile as tf
try: import cv2
except ImportError as e: print e
def get_rgb(colorStr):
"""
get R,G,B int value from a hex color string
"""
return int(colorStr[1:3], 16), int(colorStr[3:5], 16), int(colorStr[5:7], 16)
def get_color_str(R, G, B):
"""
get hex color string from R,G,B value (integer with uint8 format)
"""
if not (isinstance(R, (int, long)) and isinstance(G, (int, long)) and isinstance(G, (int, long))):
raise TypeError, 'Input R, G and B should be integer!'
if not ((0 <= R <= 255) and (0 <= G <= 255) and (
0 <= B <= 255)): raise ValueError, 'Input R, G and B should between 0 and 255!'
return '#' + ''.join(map(chr, (R, G, B))).encode('hex')
def binary_2_rgba(img, foregroundColor='#ff0000', backgroundColor='#000000', foregroundAlpha=255, backgroundAlpha=0):
"""
generate display image in (RGBA).(np.uint8) format which can be displayed by imshow
:param img: input image, should be a binary array (np.bool, or np.(u)int
:param foregroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
:param backgroundColor: color for 0 in the array, RGB str, i.e. '#ff00ff'
:param foregroundAlpha: alpha for 1 in the array, int, 0-255
:param backgroundAlpha: alpha for 1 in the array, int, 0-255
:return: displayImg, (RGBA).(np.uint8) format, ready for imshow
"""
if img.dtype == np.bool:
pass
elif issubclass(img.dtype.type, np.integer):
if np.amin(img) < 0 or np.amax(img) > 1: raise ValueError, 'Values of input image should be either 0 or 1.'
else:
raise TypeError, 'Data type of input image should be either np.bool or integer.'
if type(foregroundAlpha) is int:
if foregroundAlpha < 0 or foregroundAlpha > 255: raise ValueError, 'Value of foreGroundAlpha should be between 0 and 255.'
else:
raise TypeError, 'Data type of foreGroundAlpha should be integer.'
if type(backgroundAlpha) is int:
if backgroundAlpha < 0 or backgroundAlpha > 255: raise ValueError, 'Value of backGroundAlpha should be between 0 and 255.'
else:
raise TypeError, 'Data type of backGroundAlpha should be integer.'
fR, fG, fB = get_rgb(foregroundColor)
bR, bG, bB = get_rgb(backgroundColor)
displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
displayImg[img == 1] = np.array([fR, fG, fB, foregroundAlpha]).astype(np.uint8)
displayImg[img == 0] = np.array([bR, bG, bB, backgroundAlpha]).astype(np.uint8)
return displayImg
def scalar_2_rgba(img, color='#ff0000'):
"""
generate display a image in (RGBA).(np.uint8) format which can be displayed by imshow
alpha is defined by values in the img
:param img: input image
:param alphaMatrix: matrix of alpha
:param foreGroundColor: color for 1 in the array, RGB str, i.e. '#ff0000'
:return: displayImg, (RGBA).(np.uint8) format, ready for imshow
"""
R, G, B = get_rgb(color)
RMatrix = (R * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
GMatrix = (G * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
BMatrix = (B * ia.array_nor(img.astype(np.float32))).astype(np.uint8)
alphaMatrix = (ia.array_nor(img.astype(np.float32)) * 255).astype(np.uint8)
displayImg = np.zeros((img.shape[0], img.shape[1], 4)).astype(np.uint8)
displayImg[:, :, 0] = RMatrix;
displayImg[:, :, 1] = GMatrix;
displayImg[:, :, 2] = BMatrix;
displayImg[:, :, 3] = alphaMatrix
return displayImg
def bar_graph(left,
height,
error,
errorDir='both', # 'both', 'positive' or 'negative'
width=0.1,
plotAxis=None,
lw=3,
faceColor='#000000',
edgeColor='none',
capSize=10,
label=None
):
"""
plot a single bar with error bar
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
if errorDir == 'both':
yerr = error
elif errorDir == 'positive':
yerr = [[0], [error]]
elif errorDir == 'negative':
yerr = [[error], [0]]
plotAxis.errorbar(left + width / 2,
height,
yerr=yerr,
lw=lw,
capsize=capSize,
capthick=lw,
color=edgeColor)
plotAxis.bar(left,
height,
width=width,
color=faceColor,
edgecolor=edgeColor,
lw=lw,
label=label)
return plotAxis
def random_color(numOfColor=10):
"""
generate as list of random colors
"""
numOfColor = int(numOfColor)
colors = []
Cmatrix = (np.random.rand(numOfColor, 3) * 255).astype(np.uint8)
for i in range(numOfColor):
r = hex(Cmatrix[i][0]).split('x')[1]
if len(r) == 1:
r = '0' + r
g = hex(Cmatrix[i][1]).split('x')[1]
if len(g) == 1:
g = '0' + g
b = hex(Cmatrix[i][2]).split('x')[1]
if len(b) == 1:
b = '0' + b
colors.append('#' + r + g + b)
return colors
def show_movie(path, # tif file path or numpy arrary of the movie
mode='raw', # 'raw', 'dF' or 'dFoverF'
baselinePic=None, # picuture of baseline
baselineType='mean', # way to calculate baseline
cmap='gray'):
"""
plot tf movie in the way defined by mode
"""
if isinstance(path, str):
rawMov = tf.imread(path)
elif isinstance(path, np.ndarray):
rawMov = path
if mode == 'raw':
mov = rawMov
else:
_, dFMov, dFoverFMov = ia.normalize_movie(rawMov,
baselinePic=baselinePic,
baselineType=baselineType)
if mode == 'dF':
mov = dFMov
elif mode == 'dFoverF':
mov = dFoverFMov
else:
raise LookupError, 'The "mode" should be "raw", "dF" or "dFoverF"!'
if isinstance(path, str):
tf.imshow(mov,
cmap=cmap,
vmax=np.amax(mov),
vmin=np.amin(mov),
title=mode + ' movie of ' + path)
elif isinstance(path, np.ndarray):
tf.imshow(mov,
cmap=cmap,
vmax=np.amax(mov),
vmin=np.amin(mov),
title=mode + ' Movie')
return mov
def standalone_color_bar(vmin, vmax, cmap, sectionNum=10):
"""
plot a stand alone color bar.
"""
a = np.array([[vmin, vmax]])
plt.figure(figsize=(0.1, 9))
img = plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax)
plt.gca().set_visible(False)
cbar = plt.colorbar()
cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
def alpha_blending(image, alphaData, vmin, vmax, cmap='Paired', sectionNum=10, background=-1, interpolation='nearest',
isSave=False, savePath=None):
"""
Generate image with transparency weighted by another matrix.
Plot numpy array 'image' with colormap 'cmap'. And define the tranparency
of each pixel by the value in another numpy array alphaData.
All the elements in alphaData should be non-negative.
"""
if image.shape != alphaData.shape:
raise LookupError, '"image" and "alphaData" should have same shape!!'
if np.amin(alphaData) < 0:
raise ValueError, 'All the elements in alphaData should be bigger than zero.'
# normalize image
image[image > vmax] = vmax
image[image < vmin] = vmin
image = (image - vmin) / (vmax - vmin)
# get colored image of image
exec ('colorImage = cm.' + cmap + '(image)')
# normalize alphadata
alphaDataNor = alphaData / np.amax(alphaData)
alphaDataNor = np.sqrt(alphaDataNor)
colorImage[:, :, 3] = alphaDataNor
# plt.figure()
# plot dummy figure for colorbar
a = np.array([[vmin, vmax]])
plt.imshow(a, cmap=cmap, vmin=vmin, vmax=vmax, alpha=0)
# plt.gca().set_visible(False)
cbar = plt.colorbar()
cbar.set_ticks(np.linspace(vmin, vmax, num=sectionNum + 1))
cbar.set_alpha(1)
cbar.draw_all()
# generate black background
b = np.array(colorImage)
b[:] = background
b[:, :, 3] = 1
plt.imshow(b, cmap='gray')
# plot map
plt.imshow(colorImage, interpolation=interpolation)
return colorImage
def plot_mask(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=None, closingIteration=None):
"""
plot mask borders in a given color
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
cmap1 = col.ListedColormap(color, 'temp')
cm.register_cmap(cmap=cmap1)
if zoom != 1:
mask = ni.interpolation.zoom(mask, zoom, order=0)
mask2 = mask.astype(np.float32)
mask2[np.invert(np.isnan(mask2))] = 1.
mask2[np.isnan(mask2)] = 0.
struc = ni.generate_binary_structure(2, 2)
if borderWidth:
border = mask2 - ni.binary_erosion(mask2, struc, iterations=borderWidth).astype(np.float32)
else:
border = mask2 - ni.binary_erosion(mask2, struc).astype(np.float32)
if closingIteration:
border = ni.binary_closing(border, iterations=closingIteration).astype(np.float32)
border[border == 0] = np.nan
currfig = plotAxis.imshow(border, cmap='temp', interpolation='nearest')
return currfig
def plot_mask_borders(mask, plotAxis=None, color='#ff0000', zoom=1, borderWidth=2, closingIteration=None, **kwargs):
"""
plot mask (ROI) borders by using pyplot.contour function. all the 0s and Nans in the input mask will be considered
as background, and non-zero, non-nan pixel will be considered in ROI.
"""
if not plotAxis:
f = plt.figure()
plotAxis = f.add_subplot(111)
plotingMask = np.ones(mask.shape, dtype=np.uint8)
plotingMask[np.logical_or(np.isnan(mask), mask == 0)] = 0
if zoom != 1:
plotingMask = cv2.resize(plotingMask.astype(np.float),
dsize=(int(plotingMask.shape[1] * zoom), int(plotingMask.shape[0] * zoom)))
plotingMask[plotingMask < 0.5] = 0
plotingMask[plotingMask >= 0.5] = 1
plotingMask = plotingMask.astype(np.uint8)
if closingIteration is not None:
plotingMask = ni.binary_closing(plotingMask, iterations=closingIteration).astype(np.uint8)
plotingMask = ni.binary_erosion(plotingMask, iterations=borderWidth)
currfig = plotAxis.contour(plotingMask, levels=[0.5], colors=color, linewidths=borderWidth, **kwargs)
# put y axis in decreasing order
y_lim = list(plotAxis.get_ylim())
y_lim.sort()
plotAxis.set_ylim(y_lim[::-1])
plotAxis.set_aspect('equal')
return currfig
def grid_axis(rowNum, columnNum, totalPlotNum, **kwarg):
"""
return figure handles and axis handels for multiple subplots and figures
"""
figureNum = totalPlotNum // (rowNum * columnNum) + 1
figureHandles = []
for i in range(figureNum):
f = plt.figure(**kwarg)
figureHandles.append(f)
axisHandles = []
for i in range(totalPlotNum):
currFig = figureHandles[i // (rowNum * columnNum)]
currIndex = i % (rowNum * columnNum)
currAxis = currFig.add_subplot(rowNum, columnNum, currIndex + 1)
axisHandles.append(currAxis)
return figureHandles, axisHandles
def tile_axis(f, rowNum, columnNum, topDownMargin=0.05, leftRightMargin=0.05, rowSpacing=0.05, columnSpacing=0.05):
if 2 * topDownMargin + (
(rowNum - 1) * rowSpacing) >= 1: raise ValueError, 'Top down margin or row spacing are too big!'
if 2 * leftRightMargin + (
(columnNum - 1) * columnSpacing) >= 1: raise ValueError, 'Left right margin or column spacing are too big!'
height = (1 - (2 * topDownMargin) - (rowNum - 1) * rowSpacing) / rowNum
width = (1 - (2 * leftRightMargin) - (columnNum - 1) * columnSpacing) / columnNum
xStarts = np.arange(leftRightMargin, 1 - leftRightMargin, (width + columnSpacing))
yStarts = np.arange(topDownMargin, 1 - topDownMargin, (height + rowSpacing))[::-1]
axisList = [[f.add_axes([xStart, yStart, width, height]) for xStart in xStarts] for yStart in yStarts]
return axisList
def save_figure_without_borders(f,
savePath,
removeSuperTitle=True,
**kwargs):
"""
remove borders of a figure
"""
f.gca().get_xaxis().set_visible(False)
f.gca().get_yaxis().set_visible(False)
f.gca().set_title('')
if removeSuperTitle:
f.suptitle('')
f.savefig(savePath, pad_inches=0, bbox_inches='tight', **kwargs)
def merge_normalized_images(imgList, isFilter=True, sigma=50, mergeMethod='mean', dtype=np.float32):
"""
merge images in a list in to one, for each image, local intensity variability will be removed by subtraction of
gaussian filtered image. Then all images will be collapsed by the mergeMethod in to single image
"""
imgList2 = []
for currImg in imgList:
imgList2.append(ia.array_nor(currImg.astype(dtype)))
if mergeMethod == 'mean':
mergedImg = np.mean(np.array(imgList2), axis=0)
elif mergeMethod == 'min':
mergedImg = np.min(np.array(imgList2), axis=0)
elif mergeMethod == 'max':
mergedImg = np.max(np.array(imgList2), axis=0)
elif mergeMethod == 'median':
mergedImg = np.median(np.array(imgList2), axis=0)
if isFilter:
mergedImgf = ni.filters.gaussian_filter(mergedImg.astype(np.float), sigma=sigma)
return ia.array_nor(mergedImg - mergedImgf).astype(dtype)
else:
return ia.array_nor(mergedImg).astype(dtype)
# def hue2RGB(hue):
# """
# get the RGB value as format as hex string from the decimal ratio of hue (from 0 to 1)
# color model as described in:
# https://en.wikipedia.org/wiki/Hue
# """
# if hue < 0: hue = 0
# if hue > 1: hue = 1
# color = colorsys.hsv_to_rgb(hue,1,1)
# color = [int(x*255) for x in color]
# return get_color_str(*color)
#
#
def hot_2_rgb(hot):
"""
get the RGB value as format as hex string from the decimal ratio of hot colormap (from 0 to 1)
"""
if hot < 0: hot = 0
if hot > 1: hot = 1
cmap_hot = plt.get_cmap('hot')
color = cmap_hot(hot)[0:3];
color = [int(x * 255) for x in color]
return get_color_str(*color)
def value_2_rgb(value, cmap):
"""
get the RGB value as format as hex string from the decimal ratio of a given colormap (from 0 to 1)
"""
if value < 0: value = 0
if value > 1: value = 1
cmap = plt.get_cmap(cmap)
color = cmap(value)[0:3];
color = [int(x * 255) for x in color]
return get_color_str(*color)
if __name__ == '__main__':
plt.ioff()
print 'for debug'
| gpl-3.0 |
hugobowne/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
PredictiveScienceLab/py-mcmc | demos/demo4.py | 2 | 4183 | """
This demo demonstrates how to use a mean function in a GP and allow the model
to discover the most important basis functions.
This model is equivalent to a Relevance Vector Machine.
Author:
Ilias Bilionis
Date:
3/20/2014
"""
import numpy as np
import GPy
import pymcmc as pm
import matplotlib.pyplot as plt
# Write a class that represents the mean you wish to use:
class PolynomialBasis(object):
"""
A simple set of polynomials.
:param degree: The degree of the polynomials.
:type degree: int
"""
def __init__(self, degree):
"""
The constructor can do anything you want. The object should be
constructed before doing anything with pymcmc in any case.
Just make sure that inside the constructor you define the ``num_output``
attribute whose value should be equal to the number of basis functions.
"""
self.degree = degree
self.num_output = degree + 1 # YOU HAVE TO DEFINE THIS ATTRIBUTE!
def __call__(self, X):
"""
Evaluate the basis functions at ``X``.
Now, you should assume that ``X`` is a 2D numpy array of size
``num_points x input_dim``. If ``input_dim`` is 1, then you still need
to consider it as a 2D array because this is the kind of data that GPy
requires. If you want to make the function work also with 1D arrays if
``input_dim`` is one the use the trick below.
The output of this function should be the design matrix. That is,
it should be the matrix ``phi`` of dimensions
``num_points x num_output``. In otherwors, ``phi[i, j]`` should be
the value of basis function ``phi_j`` at ``X[i, :]``.
"""
if X.ndim == 1:
X = X[:, None] # Trick for 1D arrays
return np.hstack([X ** i for i in range(self.degree + 1)])
# Pick your degree
degree = 5
# Construct your basis
poly_basis = PolynomialBasis(degree)
# Let us generate some random data to play with
# The number of input dimensions
input_dim = 1
# The number of observations
num_points = 50
# The noise level we are going to add to the observations
noise = 0.1
# Observed inputs
X = 20. * np.random.rand(num_points, 1) - 10.
# The observations we make
Y = np.sin(X) / X + noise * np.random.randn(num_points, 1) - 0.1 * X + 0.1 * X ** 3
# Let's construct a GP model with just a mean and a diagonal covariance
# This is the mean (and at the same time the kernel)
mean = pm.MeanFunction(input_dim, poly_basis, ARD=True)
# Add an RBF kernel
kernel = GPy.kern.RBF(input_dim)
# Now, let's construct the model
model = GPy.models.GPRegression(X, Y, kernel=mean + kernel)
print 'Model before training:'
print str(model)
# You may just train the model by maximizing the likelihood:
model.optimize_restarts(messages=True)
print 'Trained model:'
print str(model)
print model.add.mean.variance
# And just plot the predictions
model.plot(plot_limits=(-10, 15))
# Let us also plot the full function
x = np.linspace(-10, 15, 100)[:, None]
y = np.sin(x) / x - 0.1 * x + 0.1 * x ** 3
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5% percentile of GP', '95% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by maximizing the likelihood')
plt.show()
a = raw_input('press enter to continue...')
# Or you might want to do it using MCMC:
new_mean = pm.MeanFunction(input_dim, poly_basis, ARD=True)
new_kernel = GPy.kern.RBF(input_dim)
new_model = GPy.models.GPRegression(X, Y, kernel=mean + new_kernel)
proposal = pm.MALAProposal(dt=0.1)
mcmc = pm.MetropolisHastings(new_model, proposal=proposal)
mcmc.sample(50000, num_thin=100, num_burn=1000, verbose=True)
print 'Model trained with MCMC:'
print str(new_model)
print new_model.add.mean.variance
# Plot everything for this too:
new_model.plot(plot_limits=(-10., 15.))
# Let us also plot the full function
plt.plot(x, y, 'r', linewidth=2)
plt.legend(['Mean of GP', '5% percentile of GP', '95% percentile of GP',
'Observations', 'Real Underlying Function'], loc='best')
plt.title('Model trained by MCMC')
plt.show()
a = raw_input('press enter to continue...')
| lgpl-3.0 |
pangwong11/jumpball | bd_analyze/nba_season_stats_analyzer.py | 1 | 5069 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as pyplot
from datetime import datetime
import os
import glob
import sys
import re
import argparse
import cv2
import random
import ast
# Argument parsing
#parser = argparse.ArgumentParser(description='Jumpball analyze')
#parser.add_argument('-s', '--season', action='store', help='Season in year', dest="year_season", required=True)
#parser.add_argument('-n', '--next-season', action='store', help='Season in year', dest="next_year_season", required=True)
#args = parser.parse_args()
#
#season = args.year_season
#next_season = args.next_year_season
#data_directory = "./nba_data"
team_stat_path = './nba_data/*.csv'
team_stat_files = glob.glob(team_stat_path)
data_types = ['Height', 'Weight', 'WL_PERC']
num_data_types = len(data_types)
def readTeamStats(file_name):
dtypes = np.dtype({ 'names' : ('team', 'Height', 'Weight', 'WL_PERC'),
'formats' : ['S10', np.float, np.float, np.float] })
data = np.loadtxt(file_name, delimiter=',', skiprows=1,
usecols=(0,2,3,4), dtype=dtypes)
#data_list = list(data)
return data
def readTeamRecord(file_name):
dtypes = np.dtype({ 'names' : ('team', 'WL_PERC'),
'formats' : ['S10', np.float] })
data = np.loadtxt(file_name, delimiter=',', skiprows=1,
usecols=(0,3), dtype=dtypes)
return data
# Iterate through each NBA team stats file and output find the mean for each teams weight and height
def analyzeTeamStats(season):
data_set=[]
team_labels_set = []
data_directory = ("/Users/aidan.wong/Documents/mystuff/cs454/jumpball/bd_collect/nba_data/%s/" % season)
for root, dirs, files in os.walk(data_directory):
for f in files:
if f.endswith("agg_data.csv"):
teamStats = readTeamStats(data_directory + f)
teamStats_list = zip(*teamStats)
team = teamStats_list[0][0]
ht_mean = np.array(teamStats_list[1], dtype=float).mean()
wt_mean = np.array(teamStats_list[2], dtype=float).mean()
wl_perc = teamStats_list[3][0]
data = [ht_mean, wt_mean,wl_perc]
data_set.append(data)
team_labels_set.append(team)
#print data_set
#print "--------------"
return data_set,team_labels_set
season = "2011"
next_year_season = "2012"
trainData_1, teamData_1 = analyzeTeamStats(season)
trainData_2, teamData_2 = analyzeTeamStats(next_year_season)
#print trainData_2
#print trainData_2[3]
print "------------------------"
print teamData_1
print teamData_2
#print teamData_2[3]
# This variable is to defined the new sample team to run K-NN with and the number should range from 0 to 30
new_team_index = 3
trainData_array = np.array(trainData_1).astype(np.float32)
newData_array = np.array(trainData_2)
#print trainData_array
#print newData_array
#print newData_array[0]
#print trainData_array
labels = np.array(np.arange(30))
#print labels
#labels = array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
for label, x, y in zip(teamData_1,trainData_array[:,0],trainData_array[:,1]):
pyplot.annotate(label,xy =(x,y), xytext=(-20,20),textcoords = 'offset points',
ha ='right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow',
alpha = 0.5),arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
for i in range(0,30):
r = lambda: random.randint(0,255)
color = '#%02X%02X%02X' % (r(),r(),r())
team_data = trainData_array[labels.ravel()==i]
pyplot.scatter(team_data[:,0],team_data[:,1],marker = 'o',s = team_data[:,2]*4500,c = color,cmap = pyplot.get_cmap('Spectral'))
#print newData_array[labels.ravel()==1]
#for i in range(1,2):
new_team_data = newData_array[labels.ravel()==new_team_index]
print "new_team_data =", new_team_data
pyplot.scatter(new_team_data[:,0],new_team_data[:,1],marker = '^',s = new_team_data[:,2]*4500,c = color,cmap = pyplot.get_cmap('Spectral'))
i = 0
for label, x, y in zip(teamData_2[:new_team_index+1],newData_array[:,0],newData_array[:,1]):
if i < new_team_index:
print i
i += 1
continue
print zip(teamData_2[:new_team_index+1],newData_array[:,0],newData_array[:,1])
print (label,x,y)
# print type(label)
pyplot.annotate(label,xy =(x,y), xytext=(-20,20),textcoords = 'offset points',
ha ='right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue',
alpha = 0.5),arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
knn = cv2.KNearest()
#print trainData_array
#
#
#knn.train(trainData_array,labels)
knn.train(trainData_array,np.array(labels).astype(np.float32))
ret, results, neighbours ,dist = knn.find_nearest((new_team_data).astype(np.float32), 1)
print (ret, results, neighbours, dist)
#
print "result: ", results,"\n"
print "neighbours: ", neighbours,"\n"
print "distance: ", dist
pyplot.show()
| apache-2.0 |
mozman/ezdxf | examples/text_layout_engine_usage.py | 1 | 12087 | # Copyright (c) 2021, Manfred Moitzi
# License: MIT License
import sys
from typing import Iterable
import pathlib
import random
import ezdxf
from ezdxf import zoom, print_config
from ezdxf.math import Matrix44
from ezdxf.tools import fonts
from ezdxf.tools import text_layout as tl
"""
This example shows the usage of the internal text_layout module to render
complex text layouts. The module is designed to render MText like entities,
but could be used for other tasks too. The layout engine supports a multi
column setup, each column contains paragraphs, and these paragraphs can
automatically flow across the columns. All locations are relative to each other,
absolute locations are not supported - tabulators are not supported.
The layout engine knows nothing about the content itself, it just manages
content boxes of a fixed given width and height and "glue" spaces in between.
The engine does not alter the size of the content boxes, but resizes the glue
if necessary. The actual rendering is done by a rendering object associated to
each content box.
The only text styling manged by the layout engine is underline, overline and
strike through multiple content boxes.
Features:
- layout alignment like MText: top-middle-bottom combined with left-center-right
- paragraph alignments: left, right, center, justified
- paragraph indentation: left, right, special first line
- cell alignments: top, center, bottom
- fraction cells: over, slanted, tolerance style
- columns have a fixed height or grows automatically, paragraphs which do not
fit "flow" into the following column.
- pass through of transformation matrix to the rendering object
TODO:
- bullet- and numbered lists
- refinements to replicate MText features as good as possible
Used for:
- drawing add-on to render MTEXT with columns
- explode MTEXT into DXF primitives (TEXT, LINE)
"""
if not ezdxf.options.use_matplotlib:
print("The Matplotlib package is required.")
sys.exit(1)
# Type alias:
Content = Iterable[tl.Cell]
DIR = pathlib.Path("~/Desktop/Outbox").expanduser()
STYLE = "Style0"
FONT = "OpenSans-Regular.ttf"
COLUMN_HEIGHT: float = 12
print_config()
doc = ezdxf.new()
msp = doc.modelspace()
style = doc.styles.new(STYLE, dxfattribs={"font": FONT})
def measure_space(font):
return font.text_width(" X") - font.text_width("X")
class SizedFont:
def __init__(self, height: float):
self.height = float(height)
self.font = fonts.make_font(FONT, self.height)
self.space = measure_space(self.font)
def text_width(self, text: str):
return self.font.text_width(text)
fix_sized_fonts = [
SizedFont(0.18),
SizedFont(0.35),
SizedFont(0.50),
SizedFont(0.70),
SizedFont(1.00),
]
class FrameRenderer(tl.ContentRenderer):
"""Render object to render a frame around a content collection.
This renderer can be used by collections which just manages content
but do not represent a content by itself (Layout, Column, Paragraph).
"""
def __init__(self, color):
self.color = color
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
"""Render a frame as LWPOLYLINE."""
pline = msp.add_lwpolyline(
[(left, top), (right, top), (right, bottom), (left, bottom)],
close=True,
dxfattribs={"color": self.color},
)
if m:
pline.transform(m)
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
"""Line renderer used to create underline, overline, strike through
and fraction dividers.
"""
line = msp.add_line(
(x1, y1), (x2, y2), dxfattribs={"color": self.color}
)
if m:
line.transform(m)
class TextRenderer(tl.ContentRenderer):
"""Text content renderer."""
def __init__(self, text, attribs):
self.text = text
self.attribs = attribs
self.line_attribs = {"color": attribs["color"]}
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
):
"""Create/render the text content"""
text = msp.add_text(self.text, dxfattribs=self.attribs)
text.set_pos((left, bottom), align="LEFT")
if m:
text.transform(m)
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
"""Line renderer used to create underline, overline, strike through
and fraction dividers.
"""
line = msp.add_line((x1, y1), (x2, y2), dxfattribs=self.line_attribs)
if m:
line.transform(m)
class Word(tl.Text):
"""Represent a word as content box for the layout engine."""
def __init__(self, text: str, font: SizedFont, stroke: int = 0):
# Each content box can have individual properties:
attribs = {
"color": random.choice((1, 2, 3, 4, 6, 7, 7)),
"height": font.height,
"style": STYLE,
}
super().__init__(
# Width and height of the content are fixed given values and will
# not be changed by the layout engine:
width=font.text_width(text),
height=font.height,
stroke=stroke,
# Each content box can have it's own rendering object:
renderer=TextRenderer(text, attribs),
)
def uniform_content(count: int, size: int = 1) -> Content:
"""Create content with one text size."""
font = fix_sized_fonts[size]
for word in tl.lorem_ipsum(count):
yield Word(word, font)
yield tl.Space(font.space)
def random_sized_content(count: int) -> Content:
"""Create content with randomized text size."""
def size():
return random.choice([0, 1, 1, 1, 1, 1, 2, 3])
for word in tl.lorem_ipsum(count):
font = fix_sized_fonts[size()]
yield Word(word, font)
yield tl.Space(font.space)
def stroke_groups(words: Iterable[str]):
group = []
count = 0
stroke = 0
for word in words:
if count == 0:
if group:
yield group, stroke
count = random.randint(1, 4)
group = [word]
stroke = random.choice([0, 0, 0, 0, 1, 1, 1, 2, 2, 4])
else:
count -= 1
group.append(word)
if group:
yield group, stroke
def stroked_content(count: int, size: int = 1) -> Content:
"""Create content with one text size and groups of words with or without
strokes.
"""
font = fix_sized_fonts[size]
groups = stroke_groups(tl.lorem_ipsum(count))
for group, stroke in groups:
# strokes should span across spaces in between words:
# Spaces between words are bound to the preceding content box renderer,
# MText is more flexible, but this implementation is easy and good
# enough, otherwise spaces would need a distinct height and a rendering
# object, both are not implemented for glue objects.
continue_stroke = stroke + 8 if stroke else 0
for word in group[:-1]:
yield Word(word, font=font, stroke=continue_stroke)
yield tl.Space(font.space)
# strokes end at the last word, without continue stroke:
yield Word(group[-1], font=font, stroke=stroke)
yield tl.Space(font.space)
class Fraction(tl.Fraction):
"""Represents a fraction for the layout engine, which consist of a top-
and bottom content box, divided by horizontal or slanted line.
The "tolerance style" has no line between the stacked content boxes.
This implementation is more flexible than MText, the content boxes can be
words but also fractions or cell groups.
"""
def __init__(
self, t1: str, t2: str, stacking: tl.Stacking, font: SizedFont
):
top = Word(t1, font)
bottom = Word(t2, font)
super().__init__(
top=top,
bottom=bottom,
stacking=stacking,
# Uses only the generic line renderer to render the divider line,
# the top- and bottom content boxes use their own render objects.
renderer=FrameRenderer(color=7),
)
def fraction_content() -> Content:
"""Create content with one text size and place random fractions between
words.
"""
words = list(uniform_content(120))
for word in words:
word.valign = tl.CellAlignment.BOTTOM
stacking_options = list(tl.Stacking)
font = SizedFont(0.25) # fraction font
for _ in range(10):
stacking = random.choice(stacking_options)
top = str(random.randint(1, 1000))
bottom = str(random.randint(1, 1000))
pos = random.randint(0, len(words) - 1)
if isinstance(words[pos], tl.Space):
pos += 1
words.insert(pos, Fraction(top, bottom, stacking, font))
words.insert(pos + 1, tl.Space(font.space))
return words
def create_layout(align: tl.ParagraphAlignment, content: Content) -> tl.Layout:
# Create a flow text paragraph for the content:
paragraph = tl.Paragraph(align=align)
paragraph.append_content(content)
# Start the layout engine and set default column width:
layout = tl.Layout(
width=8, # default column width for columns without define width
margins=(0.5,), # space around the layout
# The render object of collections like Layout, Column or Paragraph is
# called before the render objects of the content managed by the
# collection.
# This could be used to render a frame or a background:
renderer=FrameRenderer(color=2),
)
# Append the first column with default width and a content height of 12 drawing
# units. At least the first column has to be created by the client.
layout.append_column(height=COLUMN_HEIGHT, gutter=1)
# Append the content. The content will be distributed across the available
# columns and automatically overflow into adjacent columns if necessary.
# The layout engine creates new columns automatically if required by
# cloning the last column.
layout.append_paragraphs([paragraph])
# Content- and total size is always up to date, only the final location
# has to be updated by calling Layout.place().
print()
print(f"Layout has {len(layout)} columns.")
print(f"Layout total width: {layout.total_width}")
print(f"Layout total height: {layout.total_height}")
for n, column in enumerate(layout, start=1):
print()
print(f" {n}. column has {len(column)} paragraph(s)")
print(f" Column total width: {column.total_width}")
print(f" Column total height: {column.total_height}")
# It is recommended to place the layout at origin (0, 0) and use a
# transformation matrix to move the layout to the final location in
# the DXF target layout - the model space in this example.
# Set final layout location in the xy-plane with alignment:
layout.place(align=tl.LayoutAlignment.BOTTOM_LEFT)
# It is possible to add content after calling place(), but place has to be
# called again before calling the render() method of the layout.
return layout
def create(content: Content, y: float) -> None:
x: float = 0
for align in list(tl.ParagraphAlignment):
# Build and place the layout at (0, 0):
layout = create_layout(align, content)
# Render and move the layout to the final location:
m = Matrix44.translate(x, y, 0)
layout.render(m)
x += layout.total_width + 2
dy: float = COLUMN_HEIGHT + 3
create(list(uniform_content(200)), 0)
create(list(random_sized_content(200)), dy)
create(list(stroked_content(200)), 2 * dy)
create(fraction_content(), 3 * dy)
# zooming needs the longest time:
zoom.extents(msp, factor=1.1)
doc.saveas(str(DIR / "text_layout.dxf"))
| mit |
vybstat/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/simple_plot.py | 1 | 1292 | """
===========
Simple Plot
===========
Create a simple plot.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Data for plotting
t = np.arange(0.0, 2.0, 0.01)
s = 1 + np.sin(2 * np.pi * t)
# Note that using plt.subplots below is equivalent to using
# fig = plt.figure and then ax = fig.add_subplot(111)
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
pltshow(plt)
| mit |
adelomana/cassandra | conditionedFitness/figurePatterns/script.sustained.py | 2 | 1771 | import pickle
import statsmodels,statsmodels.api
import matplotlib,matplotlib.pyplot
matplotlib.rcParams.update({'font.size':36,'font.family':'Arial','xtick.labelsize':28,'ytick.labelsize':28})
thePointSize=12
# 0. user defined variables
jarDir='/Users/adriandelomana/scratch/'
# sustained trajectories
selected=['clonal.2.1','engineered.1.1','engineered.2.2','mutagenized.2.2'] # should be n = 6
# 1. iterate over selected trajectories
allx=[]; ally=[]
for replicate in selected:
print(replicate)
# read jar file
jarFile=jarDir+replicate+'.pickle'
f=open(jarFile,'rb')
trajectory=pickle.load(f)
f.close()
# recover data into a format amenable to plotting
x=trajectory[0]
y=trajectory[1]
z=trajectory[2]
for a,b in zip(x,y):
allx.append(a)
ally.append(b)
# run lowess over each replicate. if lowess does not work, do correlation. if not pchip
# plot
matplotlib.pyplot.errorbar(x,y,yerr=z,fmt='o',color='black',ecolor='black',markeredgecolor='black',capsize=0,ms=thePointSize,mew=0,alpha=0.33)
# run lowess over all data
lowess = statsmodels.api.nonparametric.lowess(ally,allx,it=10)
matplotlib.pyplot.plot(lowess[:, 0], lowess[:, 1],color='red',lw=4,zorder=100)
# close figure
matplotlib.pyplot.plot([0,300],[0,0],'--',color='black')
matplotlib.pyplot.xlim([-25,325])
matplotlib.pyplot.ylim([-0.44,0.44])
matplotlib.pyplot.xticks([0,100,200,300])
matplotlib.pyplot.yticks([-0.4,-0.2,0,0.2,0.4])
matplotlib.pyplot.xlabel('Generation')
matplotlib.pyplot.ylabel('Conditioned\nFitness')
#matplotlib.pyplot.text(-20,0.3,'Sustained')
matplotlib.pyplot.text(120,-0.38,'Sustained',color='red')
matplotlib.pyplot.tight_layout(pad=0.5)
matplotlib.pyplot.savefig('figure.sustained.pdf')
| gpl-3.0 |
gnychis/grforwarder | gnuradio-examples/python/pfb/resampler.py | 7 | 4207 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = gr.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = gr.sig_source_c(fs_in, gr.GR_SIN_WAVE, fc, 1)
#self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = blks2.pfb_arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = blks2.pfb_arb_resampler_ccf(rerate)
self.snk_in = gr.vector_sink_c()
self.snk_0 = gr.vector_sink_c()
self.snk_1 = gr.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
xubenben/data-science-from-scratch | code/clustering.py | 60 | 6438 | from __future__ import division
from linear_algebra import squared_distance, vector_mean, distance
import math, random
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
class KMeans:
"""performs k-means clustering"""
def __init__(self, k):
self.k = k # number of clusters
self.means = None # means of clusters
def classify(self, input):
"""return the index of the cluster closest to the input"""
return min(range(self.k),
key=lambda i: squared_distance(input, self.means[i]))
def train(self, inputs):
self.means = random.sample(inputs, self.k)
assignments = None
while True:
# Find new assignments
new_assignments = map(self.classify, inputs)
# If no assignments have changed, we're done.
if assignments == new_assignments:
return
# Otherwise keep the new assignments,
assignments = new_assignments
for i in range(self.k):
i_points = [p for p, a in zip(inputs, assignments) if a == i]
# avoid divide-by-zero if i_points is empty
if i_points:
self.means[i] = vector_mean(i_points)
def squared_clustering_errors(inputs, k):
"""finds the total squared error from k-means clustering the inputs"""
clusterer = KMeans(k)
clusterer.train(inputs)
means = clusterer.means
assignments = map(clusterer.classify, inputs)
return sum(squared_distance(input,means[cluster])
for input, cluster in zip(inputs, assignments))
def plot_squared_clustering_errors(plt):
ks = range(1, len(inputs) + 1)
errors = [squared_clustering_errors(inputs, k) for k in ks]
plt.plot(ks, errors)
plt.xticks(ks)
plt.xlabel("k")
plt.ylabel("total squared error")
plt.show()
#
# using clustering to recolor an image
#
def recolor_image(input_file, k=5):
img = mpimg.imread(path_to_png_file)
pixels = [pixel for row in img for pixel in row]
clusterer = KMeans(k)
clusterer.train(pixels) # this might take a while
def recolor(pixel):
cluster = clusterer.classify(pixel) # index of the closest cluster
return clusterer.means[cluster] # mean of the closest cluster
new_img = [[recolor(pixel) for pixel in row]
for row in img]
plt.imshow(new_img)
plt.axis('off')
plt.show()
#
# hierarchical clustering
#
def is_leaf(cluster):
"""a cluster is a leaf if it has length 1"""
return len(cluster) == 1
def get_children(cluster):
"""returns the two children of this cluster if it's a merged cluster;
raises an exception if this is a leaf cluster"""
if is_leaf(cluster):
raise TypeError("a leaf cluster has no children")
else:
return cluster[1]
def get_values(cluster):
"""returns the value in this cluster (if it's a leaf cluster)
or all the values in the leaf clusters below it (if it's not)"""
if is_leaf(cluster):
return cluster # is already a 1-tuple containing value
else:
return [value
for child in get_children(cluster)
for value in get_values(child)]
def cluster_distance(cluster1, cluster2, distance_agg=min):
"""finds the aggregate distance between elements of cluster1
and elements of cluster2"""
return distance_agg([distance(input1, input2)
for input1 in get_values(cluster1)
for input2 in get_values(cluster2)])
def get_merge_order(cluster):
if is_leaf(cluster):
return float('inf')
else:
return cluster[0] # merge_order is first element of 2-tuple
def bottom_up_cluster(inputs, distance_agg=min):
# start with every input a leaf cluster / 1-tuple
clusters = [(input,) for input in inputs]
# as long as we have more than one cluster left...
while len(clusters) > 1:
# find the two closest clusters
c1, c2 = min([(cluster1, cluster2)
for i, cluster1 in enumerate(clusters)
for cluster2 in clusters[:i]],
key=lambda (x, y): cluster_distance(x, y, distance_agg))
# remove them from the list of clusters
clusters = [c for c in clusters if c != c1 and c != c2]
# merge them, using merge_order = # of clusters left
merged_cluster = (len(clusters), [c1, c2])
# and add their merge
clusters.append(merged_cluster)
# when there's only one cluster left, return it
return clusters[0]
def generate_clusters(base_cluster, num_clusters):
# start with a list with just the base cluster
clusters = [base_cluster]
# as long as we don't have enough clusters yet...
while len(clusters) < num_clusters:
# choose the last-merged of our clusters
next_cluster = min(clusters, key=get_merge_order)
# remove it from the list
clusters = [c for c in clusters if c != next_cluster]
# and add its children to the list (i.e., unmerge it)
clusters.extend(get_children(next_cluster))
# once we have enough clusters...
return clusters
if __name__ == "__main__":
inputs = [[-14,-5],[13,13],[20,23],[-19,-11],[-9,-16],[21,27],[-49,15],[26,13],[-46,5],[-34,-1],[11,15],[-49,0],[-22,-16],[19,28],[-12,-8],[-13,-19],[-41,8],[-11,-6],[-25,-9],[-18,-3]]
random.seed(0) # so you get the same results as me
clusterer = KMeans(3)
clusterer.train(inputs)
print "3-means:"
print clusterer.means
print
random.seed(0)
clusterer = KMeans(2)
clusterer.train(inputs)
print "2-means:"
print clusterer.means
print
print "errors as a function of k"
for k in range(1, len(inputs) + 1):
print k, squared_clustering_errors(inputs, k)
print
print "bottom up hierarchical clustering"
base_cluster = bottom_up_cluster(inputs)
print base_cluster
print
print "three clusters, min:"
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
print
print "three clusters, max:"
base_cluster = bottom_up_cluster(inputs, max)
for cluster in generate_clusters(base_cluster, 3):
print get_values(cluster)
| unlicense |
wei-Z/Python-Machine-Learning | self_practice/CH2A.py | 1 | 4506 | import numpy as np
class Perceptron(object):
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples
is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-----------
self : object
"""
self.w_ = np.zeros(1 + X.shape[1]) # return zero array, X.shape[1] =2L, return array([0., 0., 0.])
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0) # if update = 0.0, errors = 0; if update unequal 0.0, errors =1.
self.errors_.append(errors)
return self
def net_input(self, X):
#"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0] # matrix multiplication
def predict(self, X):
#"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, -1) # numpy.where(condition[, x, y])
# Return elements, either from x or y, depending on condition.
# Training a perceptron model on the Iris dataset
import pandas as pd
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
df.tail()
import matplotlib.pyplot as plt
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)# if y == 'Iris-setosa', y = -1, otherwise if y == 'Iris-versicolor', y =1.
X = df.iloc[0:100, [0,2]].values
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper left')
plt.show()
ppn = Perceptron()
ppn.fit(X,y)
plt.plot(range(1, len(ppn.errors_)+1), ppn.errors_, marker='o', color="green")
plt.xlabel('Epochs')
plt.ylabel('Number of misclassifications')
plt.show()
# Implement a small convenience function to visualize the decision boundaries for 2D datasets:
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
print "xx1: ", xx1
print "xx2: ", xx2
print "Z: ", Z
print "xx1.ravel(): ", xx1.ravel()
print "xx2.ravel(): ", xx2.ravel()
Z = Z.reshape(xx1.shape)
print "Z: ", Z
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc = 'upper left')
plt.show()
# meshgrid, ravel, reshape, contourf, xlim, ylim
# how to use contourf and meshgrid:
x = np.arange(-5, 5, 0.1)
y = np.arange(-5, 5, 0.1)
xx, yy = np.meshgrid(x, y, sparse=True)
z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
h = plt.contourf(x,y,z)
| mit |
mganeva/mantid | qt/python/mantidqt/project/projectsaver.py | 1 | 5274 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
from __future__ import (absolute_import, division, print_function, unicode_literals)
from json import dump
import os
from mantid.api import AnalysisDataService as ADS
from mantidqt.project.workspacesaver import WorkspaceSaver
from mantidqt.project.plotssaver import PlotsSaver
from mantid import logger
class ProjectSaver(object):
def __init__(self, project_file_ext):
self.project_file_ext = project_file_ext
def save_project(self, file_name, workspace_to_save=None, plots_to_save=None, interfaces_to_save=None,
project_recovery=True):
"""
The method that will actually save the project and call relevant savers for workspaces, plots, interfaces etc.
:param file_name: String; The file_name of the
:param workspace_to_save: List; of Strings that will have workspace names in it, if None will save all
:param plots_to_save: List; of matplotlib.figure objects to save to the project file.
:param interfaces_to_save: List of Lists of Window and Encoder; the interfaces to save and the encoders to use
:param project_recovery: Bool; If the behaviour of Project Save should be altered to function correctly inside
of project recovery
:return: None; If the method cannot be completed.
"""
# Check if the file_name doesn't exist
if file_name is None:
logger.warning("Please select a valid file name")
return
# Check this isn't saving a blank project file
if (workspace_to_save is None and plots_to_save is None and interfaces_to_save is None) and project_recovery:
logger.warning("Can not save an empty project")
return
directory = os.path.dirname(file_name)
# Save workspaces to that location
if project_recovery:
workspace_saver = WorkspaceSaver(directory=directory)
workspace_saver.save_workspaces(workspaces_to_save=workspace_to_save)
saved_workspaces = workspace_saver.get_output_list()
else:
# Assume that this is project recovery so pass a list of workspace names
saved_workspaces = ADS.getObjectNames()
# Generate plots
plots_to_save_list = PlotsSaver().save_plots(plots_to_save)
# Save interfaces
if interfaces_to_save is None:
interfaces_to_save = []
interfaces = self._return_interfaces_dicts(directory=directory, interfaces_to_save=interfaces_to_save)
# Pass dicts to Project Writer
writer = ProjectWriter(workspace_names=saved_workspaces,
plots_to_save=plots_to_save_list,
interfaces_to_save=interfaces,
save_location=file_name,
project_file_ext=self.project_file_ext)
writer.write_out()
@staticmethod
def _return_interfaces_dicts(directory, interfaces_to_save):
interfaces = []
for interface, encoder in interfaces_to_save:
# Add to the dictionary encoded data with the key as the first tag in the list on the encoder attributes
try:
tag = encoder.tags[0]
encoded_dict = encoder.encode(interface, directory)
encoded_dict["tag"] = tag
interfaces.append(encoded_dict)
except Exception as e:
# Catch any exception and log it
if isinstance(e, KeyboardInterrupt):
raise
logger.warning("Project Saver: An interface could not be saver error: " + str(e))
return interfaces
class ProjectWriter(object):
def __init__(self, save_location, workspace_names, project_file_ext, plots_to_save, interfaces_to_save):
self.workspace_names = workspace_names
self.file_name = save_location
self.project_file_ext = project_file_ext
self.plots_to_save = plots_to_save
self.interfaces_to_save = interfaces_to_save
def write_out(self):
"""
Write out the project file that contains workspace names, interfaces information, plot preferences etc.
"""
# Get the JSON string versions
to_save_dict = {"workspaces": self.workspace_names, "plots": self.plots_to_save,
"interfaces": self.interfaces_to_save}
# Open file and save the string to it alongside the workspace_names
if self.project_file_ext not in os.path.basename(self.file_name):
self.file_name = self.file_name + self.project_file_ext
try:
with open(self.file_name, "w+") as f:
dump(obj=to_save_dict, fp=f)
except Exception as e:
# Catch any exception and log it
if isinstance(e, KeyboardInterrupt):
raise
logger.warning("JSON project file unable to be opened/written to")
| gpl-3.0 |
PetaVision/projects | momentLearn/scripts/recon_simple.py | 2 | 1138 | import os, sys
lib_path = os.path.abspath("/home/slundquist/workspace/PetaVision/plab/")
sys.path.append(lib_path)
from plotRecon import plotRecon
from plotReconError import plotReconError
#For plotting
#import matplotlib.pyplot as plt
outputDir = "/nh/compneuro/Data/momentLearn/output/simple_momentum_out/"
skipFrames = 10 #Only print every 20th frame
startFrames = 0
doPlotRecon = True
doPlotErr = False
errShowPlots = False
layers = [
"a1_ImageRescale",
"a4_Recon",
]
#Layers for constructing recon error
preErrLayers = [
"a1_LeftDownsample",
"a5_RightDownsample",
]
postErrLayers = [
"a3_LeftRecon",
"a7_RightRecon",
]
gtLayers = None
#gtLayers = [
# #"a25_DepthRescale",
# #"a25_DepthRescale",
# #"a25_DepthRescale",
# "a25_DepthRescale",
# "a25_DepthRescale",
# "a25_DepthRescale",
#]
preToPostScale = [
.007,
.007,
]
if(doPlotRecon):
print("Plotting reconstructions")
plotRecon(layers, outputDir, skipFrames)
if(doPlotErr):
print("Plotting reconstruction error")
plotReconError(preErrLayers, postErrLayers, preToPostScale, outputDir, errShowPlots, skipFrames, gtLayers)
| epl-1.0 |
kaichogami/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 320 | 3369 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
gwpy/seismon | RfPrediction/StackedEnsemble_Rfamplitude_prediction.py | 2 | 11411 | # Stacked Ensemble RfAmp Prediction Model
# Multiple ML regressors are individually trained and then combined via meta-regressor.
# Hyperparameters are tuned via GridSearchCV
# coding: utf-8
from __future__ import division
import optparse
import numpy as np
import pandas as pd
import os
if not os.getenv("DISPLAY", None):
import matplotlib
matplotlib.use("agg", warn=False)
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.cm as cm
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.externals import joblib
from mlxtend.regressor import StackingCVRegressor
from sklearn.datasets import load_boston
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adagrad, Adadelta, RMSprop, Adam
from keras import losses
from keras import callbacks
from keras.utils import plot_model
import pickle
__author__ = "Nikhil Mukund <nikhil.mukund@ligo.org>, Michael Coughlin <michael.coughlin.ligo.org>"
__version__ = 1.0
__date__ = "11/26/2017"
def parse_commandline():
"""@parse the options given on the command-line.
"""
parser = optparse.OptionParser(usage=__doc__,version=__version__)
parser.add_option("-f", "--earthquakesFile", help="Seismon earthquakes file.",default ="/home/mcoughlin/Seismon/Predictions/L1O1O2_CMT_GPR/earthquakes.txt")
parser.add_option("-o", "--outputDirectory", help="output folder.",default ="/home/mcoughlin/Seismon/MLA/L1O1O2/")
parser.add_option("-r", "--runType", help="run type (original, lowlatency, cmt)", default ="lowlatency")
parser.add_option("-m", "--minMagnitude", help="Minimum earthquake magnitude.", default=5.0,type=float)
parser.add_option("-N", "--Nepoch", help="number of epochs", default =10, type=int)
parser.add_option("--doPlots", action="store_true", default=False)
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Run verbosely. (Default: False)")
opts, args = parser.parse_args()
# show parameters
if opts.verbose:
print >> sys.stderr, ""
print >> sys.stderr, "running network_eqmon..."
print >> sys.stderr, "version: %s"%__version__
print >> sys.stderr, ""
print >> sys.stderr, "***************** PARAMETERS ********************"
for o in opts.__dict__.items():
print >> sys.stderr, o[0]+":"
print >> sys.stderr, o[1]
print >> sys.stderr, ""
return opts
'''
0: earthquake gps time
1: earthquake mag
2: p gps time
3: s gps time
4: r (2 km/s)
5: r (3.5 km/s)
6: r (5 km/s)
7: predicted ground motion (m/s)
8: lower bounding time
9: upper bounding time
10: latitude
11: longitude
12: distance
13: depth (m)
14: azimuth (deg)
15: nodalPlane1_strike
16: nodalPlane1_rake
17: nodalPlane1_dip
18: momentTensor_Mrt
19: momentTensor_Mtp
20: momentTensor_Mrp
21: momentTensor_Mtt
22: momentTensor_Mrr
23: momentTensor_Mpp
24: peak ground velocity gps time
25: peak ground velocity (m/s)
26: peak ground acceleration gps time
27: peak ground acceleration (m/s^2)
28: peak ground displacement gps time
29: peak ground displacement (m)
30: Lockloss time
31: Detector Status
'''
# Parse command line
opts = parse_commandline()
outputDirectory = os.path.join(opts.outputDirectory,opts.runType)
if not os.path.isdir(outputDirectory):
os.makedirs(outputDirectory)
data = pd.read_csv(opts.earthquakesFile,delimiter=' ',header=None)
neqs, ncols = data.shape
if ncols == 32:
fileType = "seismon"
elif ncols == 27:
fileType = "usarray"
data = data.drop(data.columns[[24]], 1)
data = data.rename(index=int, columns={25: 24, 26: 25})
else:
print("I do not understand the file type...")
exit(0)
# find magnitudes greater than minimum magnitude
index = data[1] > opts.minMagnitude
data = data[:][index]
# find depth = 0
index = np.where(data[[13]] == 0)[0]
data.iloc[index,13] = 1.0
# shuffle data
data = data.reindex(np.random.permutation(data.index))
Mag_idx = 1
Dist_idx = 12
Depth_idx = 13
Rf_Amp_idx = 25
# Mag threshold
Rf_Amp_thresh = 1e-8;
index = data[Rf_Amp_idx] > Rf_Amp_thresh
data = data[:][index]
if opts.runType == "cmt":
# Select features
FeatSet_index = [1,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
elif opts.runType == "lowlatency":
#FeatSet_index = [1,7,10,11,12,13,14,15,16,17] # these lower set paramaters makes sense
FeatSet_index = [1,10,11,12,13,14] # these lower set paramaters makes sense
elif opts.runType == "original":
FeatSet_index = [1,12,13] # Just Mag, Dist, Depth
else:
print("--runType must be original, lowlatency, and cmt")
exit(0)
Target_index = [Rf_Amp_idx]
# Artificially increase samples
data_temp = data
copy_num = 6
noise_level = 1e-2 # 1e-2
Rfamp_orig = data_temp[Target_index];
data_orig = data_temp
def boost_samples(x_samples,y_samples,copy_num=3,noise_level=1e-2):
# Artificially increase samples
data_x_temp = x_samples
data_y_temp = y_samples
for i in range(copy_num):
data_x_temp = np.vstack((data_x_temp,data_x_temp))
data_y_temp = np.vstack((data_y_temp,data_y_temp))
data_x_orig = data_x_temp
data_y_orig = data_y_temp
x1 = data_x_temp
x2 = np.random.randn(*data_x_temp.shape)*noise_level
x_samples_boosted = x1 + np.multiply(x1,x2)
y1 = data_y_temp
y2 = np.random.randn(*data_y_temp.shape)*noise_level
y_samples_boosted = y1 + np.multiply(y1,y2)
# Shuffle samples
#IDX = np.random.permutation(y_samples_boosted.index)
IDX = np.random.permutation(np.arange(0,len(y_samples_boosted)))
x_samples_boosted = x_samples_boosted[IDX,:]
y_samples_boosted = y_samples_boosted[IDX,:]
return x_samples_boosted, y_samples_boosted
data = data_temp
# Take Log10 of certain features (Mag, Dist, Depth)
data[[Dist_idx, Depth_idx]] = np.log10(data[[Dist_idx, Depth_idx]])
data[Target_index] = np.log10(data[Target_index])
X = np.asarray(data[FeatSet_index])
Y = np.asarray(data[Target_index])
# Normalize samples
x_scaler = preprocessing.MinMaxScaler()
#x_scaler = preprocessing.data.QuantileTransformer()
X = x_scaler.fit_transform(X)
y_scaler = preprocessing.MinMaxScaler()
#y_scaler = preprocessing.data.QuantileTransformer()
Y = y_scaler.fit_transform(Y)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,random_state=42)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.3,random_state=42)
# boost_samples + normalize + shuffle them
TUPLE1 = boost_samples(x_train,y_train,copy_num,noise_level)
TUPLE2 = boost_samples(x_val,y_val,copy_num,noise_level)
TUPLE3 = boost_samples(x_test,y_test,copy_num,noise_level)
x_train = TUPLE1[0]
y_train = TUPLE1[1]
x_val = TUPLE2[0]
y_val = TUPLE2[1]
x_test = TUPLE3[0]
y_test = TUPLE3[1]
#############################################
# Construct Stacked Ensemble Model #
#############################################
RANDOM_SEED = 42
ridge = Ridge()
lasso = Lasso()
svr_lin = SVR(kernel='linear')
svr_rbf = SVR(kernel='rbf')
lr = LinearRegression()
rf = RandomForestRegressor(random_state=RANDOM_SEED)
np.random.seed(RANDOM_SEED)
regressors = [svr_lin, svr_rbf, lr, ridge, lasso]
stack = StackingCVRegressor(regressors=regressors,
meta_regressor=rf,
use_features_in_secondary=True)
'''params = {'lasso__alpha': [0.1, 1.0, 10.0],
'ridge__alpha': [0.1, 1.0, 10.0],
'svr__C': [0.1, 1.0, 10.0],
'meta-svr__C': [0.1, 1.0, 10.0, 100.0],
'meta-svr__gamma': [0.1, 1.0, 10.0]}
params = {'lasso__alpha': [0.1, 1.0, 10.0],
'ridge__alpha': [0.1, 1.0, 10.0]}'''
model = GridSearchCV(
estimator=stack,
param_grid={
'lasso__alpha': [x/5.0 for x in range(1, 10)],
'ridge__alpha': [x/20.0 for x in range(1, 10)],
'meta-randomforestregressor__n_estimators': [10, 100]
},
cv=5,
refit=True,
verbose=10,
n_jobs=8,
)
###################################################
model.fit(x_train, y_train.ravel())
print("Best: %f using %s" % (grid.best_score_, grid.best_params_))
###################################################
y_pred = model.predict(x_test)
y_pred = np.expand_dims(y_pred,axis=1)
# Rescale Back
y_pred = 10**y_scaler.inverse_transform(y_pred)
y_test = 10**y_scaler.inverse_transform(y_test)
# Reject test samples below certain threshold
Rf_thresh = 0.5*1e-7 # 0.5*1e-6
ijk = y_test > Rf_thresh
y_test = y_test[ijk]
y_pred = y_pred[ijk]
x_test = x_test[ijk.flatten(),:]
# Add bias
#y_pred = y_pred + 0.1*y_pred
# sort results in Ascending order
y_test_sort = np.sort(y_test,axis=0)
y_pred_sort = y_pred[np.argsort(y_test,axis=0)]
## Percentage within the specified factor
Fac = 2
IDX = y_pred_sort/(y_test_sort+np.finfo(float).eps) >= 1
K = y_pred_sort[IDX]
Q = y_test_sort[IDX]
L = y_pred_sort[~IDX]
M = y_test_sort[~IDX]
Upper_indices = [i for i, x in enumerate(K <= Fac*Q) if x == True]
Lower_indices = [i for i, x in enumerate(L >= M/Fac) if x == True]
Percent_within_Fac = (len(Upper_indices) + len(Lower_indices))/len(y_pred)*100
print("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
Diff = abs(y_pred_sort - y_test_sort)
# Errorbar values
yerr_lower = y_test_sort - y_test_sort/Fac
yerr_upper = Fac*y_test_sort - y_test_sort
idx = np.arange(0,len(y_test_sort))
if opts.doPlots:
font = {'weight' : 'bold',
'size' : 15}
plt.rc('font', **font)
plt.rc('legend',**{'fontsize':15})
plt.figure(figsize=(10,8))
plt.style.use('dark_background')
#plt.style.use('ggplot')
diff_plt = plt.scatter(idx,Diff,color='lightgreen',alpha=0.1)
errorbar_plt = plt.errorbar(idx,y_test_sort,yerr=[yerr_lower,yerr_upper], alpha=0.05 ,color='lightgrey')
actual_plt = plt.scatter(idx,y_test_sort,color='#1f77b4',alpha=0.9)
idx2 = np.arange(0,len(y_pred_sort))
pred_plt = plt.scatter(idx2,y_pred_sort,color='#d62728',alpha=0.2)
plt.yscale('log')
plt.grid()
plt.ylim([1e-7, 1e-3])
#plt.ylim([0, 1])
#plt.ylabel('Rf Amplitude (m/s) \n (Normalized to 1)',fontsize=25)
plt.ylabel('Rf Amplitude (m/s) ',fontsize=25)
plt.xlabel('Samples',fontsize=25)
plt.title("Percentage captured within a factor of {} = {:.2f}".format(Fac,Percent_within_Fac))
legend_plt = plt.legend([pred_plt,actual_plt, diff_plt],['Prediction', 'Actual', 'Difference'],loc=2,markerscale=2., scatterpoints=100)
plt.autoscale(enable=True, axis='x', tight=True)
plt.grid(linestyle=':')
plt.savefig(os.path.join(outputDirectory,'performance.pdf'),bbox_inches='tight')
plt.close()
# Save Model
# serialize model & pickle
pickle.dump(model, open("%s/model.p"%outputDirectory, "wb"))
print("Saved model to disk")
'''
# Load Saved Model
# load pickle
pickle_file = open('%s/model.p'%outputDirectory, 'rb')
loaded_model_pickle = pickle.load(pickle_file)
print("Loaded model from disk")
'''
| gpl-3.0 |
hiuwo/acq4 | acq4/analysis/tools/Fitting.py | 1 | 36006 | #!/usr/bin/env python
"""
Python class wrapper for data fitting.
Includes the following external methods:
getFunctions returns the list of function names (dictionary keys)
FitRegion performs the fitting
Note that FitRegion will plot on top of the current data using MPlots routines
if the current curve and the current plot instance are passed.
"""
# January, 2009
# Paul B. Manis, Ph.D.
# UNC Chapel Hill
# Department of Otolaryngology/Head and Neck Surgery
# Supported by NIH Grants DC000425-22 and DC004551-07 to PBM.
# Copyright Paul Manis, 2009
#
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Additional Terms:
The author(s) would appreciate that any modifications to this program, or
corrections of erros, be reported to the principal author, Paul Manis, at
pmanis@med.unc.edu, with the subject line "PySounds Modifications".
Note: This program also relies on the TrollTech Qt libraries for the GUI.
You must obtain these libraries from TrollTech directly, under their license
to use the program.
"""
import sys
import numpy
import scipy
try:
import openopt
HAVE_OPENOPT = True
except ImportError:
HAVE_OPENOPT = False
print "There was an error importing openopt. Continuing...."
import ctypes
import numpy.random
#from numba import autojit
usingMPlot = False
if usingMPlot:
import MPlot # we include plotting as part of the fitting
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
if pyqt:
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
if pyqt:
pyqtRemoveInputHook()
set_trace()
class Fitting():
# dictionary contains:
# name of function: function call, initial parameters, iterations, plot color, then x and y for testing
# target valutes, names of parameters, contant values, and derivative function if needed.
#
def __init__(self):
self.fitfuncmap = {
'exp0' : (self.exp0eval, [0.0, 20.0], 2000, 'k', [0, 100, 1.],
[1.0, 5.0], ['A0', 'tau'], None, None),
'exp1' : (self.expeval, [0.0, 0.0, 20.0], 2000, 'k', [0, 100, 1.],
[0.5, 1.0, 5.0], ['DC', 'A0', 'tau'], None, self.expevalprime),
'expsum' : (self.expsumeval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'expsum2' : (self.expsumeval2, [0., -0.5, -0.250], 50000, 'k', [0, 1000, 1.],
[0., -0.5, -0.25], ['A0', 'A1'], [5., 20.], None),
'exp2' : (self.exp2eval, [0.0, -0.5, 200.0, -0.25, 450.0], 500000, 'k', [0, 1000, 1.],
[0.0, -1.0, 150.0, -0.25, 350.0], ['DC', 'A0', 'tau0', 'A1', 'tau1'], None, None),
'exppow' : (self.exppoweval, [0.0, 1.0, 100, ], 2000, 'k', [0, 100, 0.1],
[0.0, 1.0, 100.0], ['DC', 'A0', 'tau'], None, None),
'exppulse' : (self.expPulse, [3.0, 2.5, 0.2, 2.5, 2.0, 0.5], 2000, 'k', [0, 10, 0.3],
[0.0, 0., 0.75, 4., 1.5, 1.], ['DC', 't0', 'tau1', 'tau2', 'amp', 'width'], None, None),
'boltz' : (self.boltzeval, [0.0, 1.0, -50.0, -5.0], 5000, 'r', [-130., -30., 1.],
[0.00, 0.010, -100.0, 7.0], ['DC', 'A0', 'x0', 'k'], None, None),
'gauss' : (self.gausseval, [1.0, 0.0, 0.5], 2000, 'y', [-10., 10., 0.2],
[1.0, 1.0, 2.0], ['A', 'mu', 'sigma'], None, None),
'line' : (self.lineeval, [1.0, 0.0], 500, 'r', [-10., 10., 0.5],
[0.0, 2.0], ['m', 'b'], None, None),
'poly2' : (self.poly2eval, [1.0, 1.0, 0.0], 500, 'r', [0, 100, 1.],
[0.5, 1.0, 5.0], ['a', 'b', 'c'], None, None),
'poly3' : (self.poly3eval, [1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd'], None, None),
'poly4' : (self.poly4eval, [1.0, 1.0, 1.0, 1.0, 0.0], 1000, 'r', [0., 100., 1.],
[0.1, 0.5, 1.0, 5.0, 2.0], ['a', 'b', 'c', 'd', 'e'], None, None),
'sin' : (self.sineeval, [-1., 1.0, 4.0, 0.0], 1000, 'r', [0., 100., 0.2],
[0.0, 1.0, 9.0, 0.0], ['DC', 'A', 'f', 'phi'], None, None),
'boltz2' : (self.boltzeval2, [0.0, 0.5, -50.0, 5.0, 0.5, -20.0, 3.0], 1200, 'r',
[-100., 50., 1.], [0.0, 0.3, -45.0, 4.0, 0.7, 10.0, 12.0],
['DC', 'A1', 'x1', 'k1', 'A2', 'x2', 'k2'], None, None),
'taucurve' : (self.taucurve, [50., 300.0, 60.0, 10.0, 8.0, 65.0, 10.0], 50000, 'r',
[-150., 50., 1.], [0.0, 237.0, 60.0, 12.0, 17.0, 60.0, 14.0],
['DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'], None, self.taucurveder),
}
self.fitSum2Err = 0
def getFunctions(self):
return(self.fitfuncmap.keys())
def exp0eval(self, p, x, y=None, C = None, sumsq = False):
"""
Exponential function with an amplitude and 0 offset
"""
yd = p[0] * numpy.exp(-x/p[1])
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expsumeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials with independent time constants and amplitudes,
and a DC offset
"""
yd = p[0] + (p[1]* numpy.exp(-x/p[2])) + (p[3]*numpy.exp(-x/p[4]))
if y is None:
return yd
else:
yerr = y - yd
if weights is not None:
yerr = yerr * weights
if sumsq is True:
return numpy.sum(yerr**2)
else:
return yerr
def expsumeval2(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Sum of two exponentials, with predefined time constants , allowing
only the amplitudes and DC offset to vary
"""
yd = p[0] + (p[1]* numpy.exp(-x/C[0])) + (p[2]*numpy.exp(-x/C[1]))
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Exponential with offset
"""
yd = p[0] + p[1] * numpy.exp(-x/p[2])
# print yd.shape
# print y.shape
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def expevalprime(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Derivative for exponential with offset
"""
ydp = p[1] * numpy.exp(-x/p[2])/(p[2]*p[2])
yd = p[0] + p[1] * numpy.exp(-x/p[2])
print y
if y is None:
return (yd, ydp)
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exppoweval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
Single exponential function, rising to a ppower
"""
if C is None:
cx = 1.0
else:
cx = C[0]
yd = p[0] + p[1] * (1.0-numpy.exp(-x/p[2]))**cx
if y is None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def exp2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
"""
For fit to activation currents...
"""
yd = p[0] + (p[1] * (1.0 - numpy.exp(-x/p[2]))**2.0 ) + (p[3] * (1.0 - numpy.exp(-x/p[4])))
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y - yd)**2.0))
# if p[4] < 3.0*p[2]:
# ss = ss*1e6 # penalize them being too close
return ss
else:
return y - yd
# @autojit
def expPulse(self, p, x, y=None, C=None, sumsq = False, weights = None):
"""Exponential pulse function (rising exponential with optional variable-length
plateau followed by falling exponential)
Parameter p is [yOffset, t0, tau1, tau2, amp, width]
"""
yOffset, t0, tau1, tau2, amp, width = p
yd = numpy.empty(x.shape)
yd[x<t0] = yOffset
m1 = (x>=t0)&(x<(t0+width))
m2 = (x>=(t0+width))
x1 = x[m1]
x2 = x[m2]
yd[m1] = amp*(1-numpy.exp(-(x1-t0)/tau1))+yOffset
amp2 = amp*(1-numpy.exp(-width/tau1)) ## y-value at start of decay
yd[m2] = ((amp2)*numpy.exp(-(x2-(width+t0))/tau2))+yOffset
if y == None:
return yd
else:
if sumsq is True:
ss = numpy.sqrt(numpy.sum((y-yd)**2.0))
return ss
else:
return y-yd
def boltzeval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + (p[1]-p[0])/(1.0 + numpy.exp((x-p[2])/p[3]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2.0))
else:
return y - yd
def boltzeval2(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]/(1 + numpy.exp((x-p[2])/p[3])) + p[4]/(1 + numpy.exp((x-p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def gausseval(self,p, x, y=None, C = None, sumsq = False, weights=None):
yd = (p[0]/(p[2]*numpy.sqrt(2.0*numpy.pi)))*numpy.exp(-((x - p[1])**2.0)/(2.0*(p[2]**2.0)))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def lineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x + p[1]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly2eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**2.0 + p[1]*x + p[2]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly3eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**3.0 + p[1]*x**2.0 + p[2]*x +p[3]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def poly4eval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0]*x**4.0 + p[1]*x**3.0 + p[2]*x**2.0 + p[3]*x +p[4]
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def sineeval(self, p, x, y=None, C = None, sumsq = False, weights=None):
yd = p[0] + p[1]*numpy.sin((x*2.0*numpy.pi/p[2])+p[3])
if y == None:
return yd
else:
if sumsq is True:
return numpy.sum((y - yd)**2)
else:
return y - yd
def taucurve(self, p, x, y=None, C = None, sumsq=True, weights=None):
"""
HH-like description of activation/inactivation function
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
yd = p[0] + 1.0/(p[1]*numpy.exp((x+p[2])/p[3]) +p[4]*numpy.exp(-(x+p[5])/p[6]))
if y == None:
return yd
else:
if sumsq is True:
return numpy.sqrt(numpy.sum((y - yd)**2))
else:
return y - yd
def taucurveder(self, p, x):
"""
Derivative for taucurve
'DC', 'a1', 'v1', 'k1', 'a2', 'v2', 'k2'
"""
y = -(p[1]*numpy.exp((p[2] + x)/p[3])/p[3] - p[4]*numpy.exp(-(p[5] + x)/p[6])/p[6])/(p[1]*numpy.exp((p[2] + x)/p[3]) +
p[4]*numpy.exp(-(p[5] + x)/p[6]))**2.0
# print 'dy: ', y
return y
def getClipData(self, x, y, t0, t1):
"""
Return the values in y that match the x range in tx from
t0 to t1. x must be monotonic increasing or decreasing.
Allow for reverse ordering. """
it0 = (numpy.abs(x-t0)).argmin()
it1 = (numpy.abs(x-t1)).argmin()
if it0 > it1:
t = it1
it1 = it0
it0 = t
return(x[it0:it1], y[it0:it1])
def FitRegion(self, whichdata, thisaxis, tdat, ydat, t0 = None, t1 = None,
fitFunc = 'exp1', fitFuncDer = None, fitPars = None, fixedPars = None,
fitPlot = None, plotInstance = None, dataType= 'xy', method = None,
bounds=None, weights=None, constraints=()):
"""
**Arguments**
============= ===================================================
whichdata
thisaxis
tdat
ydat
t0 (optional) Minimum of time data - determined from tdat if left unspecified
t1 (optional) Maximum of time data - determined from tdat if left unspecified
fitFunc (optional) The function to fit the data to (as defined in __init__). Default is 'exp1'.
fitFuncDer (optional) default=None
fitPars (optional) Initial fit parameters. Use the values defined in self.fitfuncmap if unspecified.
fixedPars (optional) Fixed parameters to pass to the function. Default=None
fitPlot (optional) default=None
plotInstance (optional) default=None
dataType (optional) Options are ['xy', 'blocks']. Default='xy'
method (optional) Options are ['curve_fit', 'fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B', 'openopt']. Default='leastsq'
bounds (optional) default=None
weights (optional) default=None
constraints (optional) default=()
============= ===================================================
To call with tdat and ydat as simple arrays:
FitRegion(1, 0, tdat, ydat, FitFunc = 'exp1')
e.g., the first argument should be 1, but this axis is ignored if datatype is 'xy'
"""
self.fitSum2Err = 0.0
if t0 == t1:
if plotInstance is not None and usingMPlot:
(x, y) = plotInstance.getCoordinates()
t0 = x[0]
t1 = x[1]
if t1 is None:
t1 = numpy.max(tdat)
if t0 is None:
t0 = numpy.min(tdat)
func = self.fitfuncmap[fitFunc]
if func is None:
print "FitRegion: unknown function %s" % (fitFunc)
return
xp = []
xf = []
yf = []
yn = []
tx = []
names = func[6]
if fitPars is None:
fpars = func[1]
else:
fpars = fitPars
if method == 'simplex': # remap calls if needed for newer versions of scipy (>= 0.11)
method = 'Nelder-Mead'
if ydat.ndim == 1 or dataType == 'xy' or dataType == '2d': # check if 1-d, then "pretend" its only a 1-element block
nblock = 1
else:
nblock = ydat.shape[0] # otherwise, this is the number of traces in the block
# print 'datatype: ', dataType
# print 'nblock: ', nblock
# print 'whichdata: ', whichdata
# for block in range(nblock):
for record in whichdata:
if dataType == 'blocks':
(tx, dy) = self.getClipData(tdat[block], ydat[block][record, thisaxis, :], t0, t1)
else:
(tx, dy) = self.getClipData(tdat, ydat[record,:], t0, t1)
# print 'Fitting.py: block, type, Fit data: ', block, dataType
# print tx.shape
# print dy.shape
yn.append(names)
if not any(tx):
continue # no data in the window...
ier = 0
#
# Different optimization methods are included here. Not all have been tested fully with
# this wrapper.
#
if method is None or method == 'leastsq': # use standard leastsq, no bounds
plsq, cov, infodict, mesg, ier = scipy.optimize.leastsq(func[0], fpars,
args=(tx.astype('float64'), dy.astype('float64'), fixedPars),
full_output = 1, maxfev = func[2])
if ier > 4:
print "optimize.leastsq error flag is: %d" % (ier)
print mesg
elif method == 'curve_fit':
print fpars
print fixedPars
plsq, cov = scipy.optimize.curve_fit(func[0], tx.astype('float64'), dy.astype('float64'), p0=fpars)
ier = 0
elif method in ['fmin', 'simplex', 'Nelder-Mead', 'bfgs', 'TNC', 'SLSQP', 'COBYLA', 'L-BFGS-B']: # use standard wrapper from scipy for those routintes
res = scipy.optimize.minimize(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
method=method, jac=None, hess=None, hessp=None, bounds=bounds, constraints=constraints, tol=None, callback=None,
options={'maxiter': func[2], 'disp': False })
plsq = res.x
#print " method:", method
#print " bounds:", bounds
#print " result:", plsq
# next section is replaced by the code above - kept here for reference if needed...
# elif method == 'fmin' or method == 'simplex':
# plsq = scipy.optimize.fmin(func[0], fpars, args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True),
# maxfun = func[2]) # , iprint=0)
# ier = 0
# elif method == 'bfgs':
# plsq, cov, infodict = scipy.optimize.fmin_l_bfgs_b(func[0], fpars, fprime=func[8],
# args=(tx.astype('float64'), dy.astype('float64'), fixedPars, True, weights),
# maxfun = func[2], bounds = bounds,
# approx_grad = True) # , disp=0, iprint=-1)
elif method == 'openopt': # use OpenOpt's routines - usually slower, but sometimes they converge better
if not HAVE_OPENOPT:
raise Exception("Requested openopt fitting method but openopt is not installed.")
if bounds is not None:
# unpack bounds
lb = [y[0] for y in bounds]
ub = [y[1] for y in bounds]
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer, lb=lb, ub=ub)
# fopt.df = func[8]
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
fopt = openopt.DFP(func[0], fpars, tx, dy, df = fitFuncDer)
print func[8]
# fopt.df = func[7]
fopt.checkdf()
r = fopt.solve('nlp:ralg', plot=0, iprint = 10)
plsq = r.xf
ier = 0
else:
print 'method %s not recognized, please check Fitting.py' % (method)
return
xfit = numpy.arange(min(tx), max(tx), (max(tx)-min(tx))/100.0)
yfit = func[0](plsq, xfit, C=fixedPars)
yy = func[0](plsq, tx, C=fixedPars) # calculate function
self.fitSum2Err = numpy.sum((dy - yy)**2)
if usingMPlot and FitPlot != None and plotInstance != None:
self.FitPlot(xFit = xfit, yFit = yfit, fitFunc = fund[0],
fitPars = plsq, plot = fitPlot, plotInstance = plotInstance)
xp.append(plsq) # parameter list
xf.append(xfit) # x plot point list
yf.append(yfit) # y fit point list
# print xp
# print len(xp)
return(xp, xf, yf, yn) # includes names with yn and range of tx
def FitPlot(self, xFit = None, yFit = None, fitFunc = 'exp1',
fitPars = None, fixedPars = None, fitPlot=None, plotInstance = None,
color=None):
""" Plot the fit data onto the fitPlot with the specified "plot Instance".
if there is no xFit, or some parameters are missing, we just return.
if there is xFit, but no yFit, then we try to compute the fit with
what we have. The plot is superimposed on the specified "fitPlot" and
the color is specified by the function color in the fitPars list.
"""
if xFit is None or fitPars is None:
return
func = self.fitfuncmap[fitFunc]
if color is None:
fcolor = func[3]
else:
fcolor = color
if yFit is None:
yFit = numpy.array([])
for k in range(0, len(fitPars)):
yFit[k] = func[0](fitPars[k], xFit[k], C=fixedPars)
if plotInstance is None or fitPlot is None:
return(yfit)
for k in range(0, len(fitPars)):
plotInstance.PlotLine(fitPlot, xFit[k], yFit[k], color = fcolor)
return(yfit)
def getFitErr(self):
""" Return the fit error for the most recent fit
"""
return(self.fitSum2Err)
def expfit(self, x, y):
""" find best fit of a single exponential function to x and y
using the chebyshev polynomial approximation.
returns (DC, A, tau) for fit.
Perform a single exponential fit to data using Chebyshev polynomial method.
Equation fit: y = a1 * exp(-x/tau) + a0
Call: [a0 a1 tau] = expfit(x,y);
Calling parameter x is the time base, y is the data to be fit.
Returned values: a0 is the offset, a1 is the amplitude, tau is the time
constant (scaled in units of x).
Relies on routines chebftd to generate polynomial coeffs, and chebint to compute the
coefficients for the integral of the data. These are now included in this
.py file source.
This version is based on the one in the pClamp manual: HOWEVER, since
I use the bounded [-1 1] form for the Chebyshev polynomials, the coefficients are different,
and the resulting equation for tau is different. I manually optimized the tau
estimate based on fits to some simulated noisy data. (Its ok to use the whole range of d1 and d0
when the data is clean, but only the first few coeffs really hold the info when
the data is noisy.)
NOTE: The user is responsible for making sure that the passed data is appropriate,
e.g., no large noise or electronic transients, and that the time constants in the
data are adequately sampled.
To do a double exp fit with this method is possible, but more complex.
It would be computationally simpler to try breaking the data into two regions where
the fast and slow components are dominant, and fit each separately; then use that to
seed a non-linear fit (e.g., L-M) algorithm.
Final working version 4/13/99 Paul B. Manis
converted to Python 7/9/2009 Paul B. Manis. Seems functional.
"""
n = 30; # default number of polynomials coeffs to use in fit
a = numpy.amin(x)
b = numpy.amax(x)
d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...
d1 = self.chebint(a, b, d0, n) # coeffs of integral...
tau = -numpy.mean(d1[2:3]/d0[2:3])
try:
g = numpy.exp(-x/tau)
except:
g = 0.0
dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function
# now estimate the amplitude from the ratios of the coeffs.
a1 = self.estimate(d0, dg, 1)
a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here
return(a0, a1, tau)#
def estimate(self, c, d, m):
""" compute optimal estimate of parameter from arrays of data """
n = len(c)
a = sum(c[m:n]*d[m:n])/sum(d[m:n]**2.0)
return(a)
# note : the following routine is a bottleneck. It should be coded in C.
def chebftd(self, a, b, n, t, d):
""" Chebyshev fit; from Press et al, p 192.
matlab code P. Manis 21 Mar 1999
"Given a function func, lower and upper limits of the interval [a,b], and
a maximum degree, n, this routine computes the n coefficients c[1..n] such that
func(x) sum(k=1, n) of ck*Tk(y) - c0/2, where y = (x -0.5*(b+a))/(0.5*(b-a))
This routine is to be used with moderately large n (30-50) the array of c's is
subsequently truncated at the smaller value m such that cm and subsequent
terms are negligible."
This routine is modified so that we find close points in x (data array) - i.e., we find
the best Chebyshev terms to describe the data as if it is an arbitrary function.
t is the x data, d is the y data...
"""
bma = 0.5*(b-a)
bpa = 0.5*(b+a)
inc = t[1]-t[0]
f = numpy.zeros(n)
for k in range(0, n):
y = numpy.cos(numpy.pi*(k+0.5)/n)
pos = int(0.5+(y*bma+bpa)/inc)
if pos < 0:
pos = 0
if pos >= len(d)-2:
pos = len(d)-2
try:
f[k]= d[pos+1]
except:
print "error in chebftd: k = %d (len f = %d) pos = %d, len(d) = %d\n" % (k, len(f), pos, len(d))
print "you should probably make sure this doesn't happen"
fac = 2.0/n
c=numpy.zeros(n)
for j in range(0, n):
sum=0.0
for k in range(0, n):
sum = sum + f[k]*numpy.cos(numpy.pi*j*(k+0.5)/n)
c[j]=fac*sum
return(c)
def chebint(self, a, b, c, n):
""" Given a, b, and c[1..n] as output from chebft or chebftd, and given n,
the desired degree of approximation (length of c to be used),
this routine computes cint, the Chebyshev coefficients of the
integral of the function whose coeffs are in c. The constant of
integration is set so that the integral vanishes at a.
Coded from Press et al, 3/21/99 P. Manis (Matlab)
Python translation 7/8/2009 P. Manis
"""
sum = 0.0
fac = 1.0
con = 0.25*(b-a) # factor that normalizes the interval
cint = numpy.zeros(n)
for j in range(1,n-2):
cint[j]=con*(c[j-1]-c[j+1])/j
sum = sum + fac * cint[j]
fac = - fac
cint[n-1] = con*c[n-2]/(n-1)
sum = sum + fac*cint[n-1]
cint[0] = 2.0*sum # set constant of integration.
return(cint)
# routine to flatten an array/list.
#
def flatten(self, l, ltypes=(list, tuple)):
i = 0
while i < len(l):
while isinstance(l[i], ltypes):
if not l[i]:
l.pop(i)
if not len(l):
break
else:
l[i:i+1] = list(l[i])
i += 1
return l
# flatten()
# run tests if we are "main"
if __name__ == "__main__":
# import matplotlib.pyplot as pyplot
import timeit
import Fitting
import matplotlib as MP
MP.use('Qt4Agg')
################## Do not modify the following code
# sets up matplotlib with sans-serif plotting...
import matplotlib.gridspec as GS
# import mpl_toolkits.axes_grid1.inset_locator as INSETS
# #import inset_axes, zoomed_inset_axes
# import mpl_toolkits.axes_grid1.anchored_artists as ANCHOR
# # import AnchoredSizeBar
stdFont = 'Arial'
import matplotlib.pyplot as pylab
pylab.rcParams['text.usetex'] = True
pylab.rcParams['interactive'] = False
pylab.rcParams['font.family'] = 'sans-serif'
pylab.rcParams['font.sans-serif'] = 'Arial'
pylab.rcParams['mathtext.default'] = 'sf'
pylab.rcParams['figure.facecolor'] = 'white'
# next setting allows pdf font to be readable in Adobe Illustrator
pylab.rcParams['pdf.fonttype'] = 42
pylab.rcParams['text.dvipnghack'] = True
##################### to here (matplotlib stuff - touchy!
Fits = Fitting.Fitting()
# x = numpy.arange(0, 100.0, 0.1)
# y = 5.0-2.5*numpy.exp(-x/5.0)+0.5*numpy.random.randn(len(x))
# (dc, aFit,tauFit) = Fits.expfit(x,y)
# yf = dc + aFit*numpy.exp(-x/tauFit)
# pyplot.figure(1)
# pyplot.plot(x,y,'k')
# pyplot.hold(True)
# pyplot.plot(x, yf, 'r')
# pyplot.show()
exploreError = False
if exploreError is True:
# explore the error surface for a function:
func = 'exppulse'
f = Fits.fitfuncmap[func]
p1range = numpy.arange(0.1, 5.0, 0.1)
p2range = numpy.arange(0.1, 5.0, 0.1)
err = numpy.zeros((len(p1range), len(p2range)))
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
# check exchange of tau1 ([1]) and width[4]
C = None
yOffset, t0, tau1, tau2, amp, width = f[1] # get inital parameters
y0 = f[0](f[1], x, C=C)
noise = numpy.random.random(y0.shape) - 0.5
y0 += 0.0* noise
sh = err.shape
yp = numpy.zeros((sh[0], sh[1], len(y0)))
for i, p1 in enumerate(p1range):
tau1t = tau1*p1
for j, p2 in enumerate(p2range):
ampt = amp*p2
pars = (yOffset, t0, tau1t, tau2, ampt, width) # repackage
err[i,j] = f[0](pars, x, y0, C=C, sumsq = True)
yp[i,j] = f[0](pars, x, C=C, sumsq = False)
pylab.figure()
CS=pylab.contour(p1range*tau1, p2range*width, err, 25)
CB = pylab.colorbar(CS, shrink=0.8, extend='both')
pylab.figure()
for i, p1 in enumerate(p1range):
for j, p2 in enumerate(p2range):
pylab.plot(x, yp[i,j])
pylab.plot(x, y0, 'r-', linewidth=2.0)
# run tests for each type of fit, return results to compare parameters
cons = None
bnds = None
signal_to_noise = 100000.
for func in Fits.fitfuncmap:
if func != 'exppulse':
continue
print "\nFunction: %s\nTarget: " % (func),
f = Fits.fitfuncmap[func]
for k in range(0,len(f[1])):
print "%f " % (f[1][k]),
print "\nStarting: ",
for k in range(0,len(f[5])):
print "%f " % (f[5][k]),
# nstep = 500.0
# if func == 'sin':
# nstep = 100.0
x = numpy.array(numpy.arange(f[4][0], f[4][1], f[4][2]))
C = None
if func == 'expsum2':
C = f[7]
if func == 'exppulse':
C = f[7]
y = f[0](f[1], x, C=C)
yd = numpy.array(y)
noise = numpy.random.normal(0, 0.1, yd.shape)
my = numpy.amax(yd)
#yd = yd + sigmax*0.05*my*(numpy.random.random_sample(shape(yd))-0.5)
yd += noise*my/signal_to_noise
testMethod = 'SLSQP'
if func == 'taucurve':
continue
bounds=[(0., 100.), (0., 1000.), (0.0, 500.0), (0.1, 50.0),
(0., 1000), (0.0, 500.0), (0.1, 50.0)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'boltz':
continue
bounds = [(-0.5,0.5), (0.0, 20.0), (-120., 0.), (-20., 0.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exp2':
bounds=[(-0.001, 0.001), (-5.0, 0.), (1.0, 500.0), (-5.0, 0.0),
(1., 10000.)]
(fpar, xf, yf, names) = Fits.FitRegion(numpy.array([1]), 0, x, yd, fitFunc = func, bounds=bounds, method=testMethod)
elif func == 'exppulse':
# set some constraints to the fitting
# yOffset, tau1, tau2, amp, width = f[1] # order of constraings
dt = numpy.mean(numpy.diff(x))
bounds = [(-5, 5), (-15., 15.), (-2, 2.0), (2-10, 10.), (-5, 5.), (0., 5.)]
# cxample for constraints:
# cons = ({'type': 'ineq', 'fun': lambda x: x[4] - 3.0*x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 12*x[2]},
# {'type': 'ineq', 'fun': lambda x: x[2]},
# {'type': 'ineq', 'fun': lambda x: - x[4] + 2000},
# )
cons = ({'type': 'ineq', 'fun': lambda x: x[3] - x[2] }, # tau1 < tau2
)
C = None
tv = f[5]
initialgr = f[0](f[5], x, None )
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bounds, method=testMethod)
# print xf
# print yf
# print fpar
# print names
else:
(fpar, xf, yf, names) = Fits.FitRegion(
numpy.array([1]), 0, x, yd, fitFunc = func, fixedPars = C, constraints = cons, bounds = bnds, method=testMethod)
#print fpar
s = numpy.shape(fpar)
j = 0
outstr = ""
initstr = ""
truestr = ""
for i in range(0, len(names[j])):
# print "%f " % fpar[j][i],
outstr = outstr + ('%s = %f, ' % (names[j][i], fpar[j][i]))
initstr = initstr + '%s = %f, ' % (names[j][i], tv[i])
truestr = truestr + '%s = %f, ' % (names[j][i], f[1][i])
print( "\nTrue(%d) : %s" % (j, truestr) )
print( "FIT(%d) : %s" % (j, outstr) )
print( "init(%d) : %s" % (j, initstr) )
print( "Error: : %f" % (Fits.fitSum2Err))
if func is 'exppulse':
pylab.figure()
pylab.plot(numpy.array(x), yd, 'ro-')
pylab.hold(True)
pylab.plot(numpy.array(x), initialgr, 'k--')
pylab.plot(xf[0], yf[0], 'b-') # fit
pylab.show()
| mit |
nilearn/nilearn_sandbox | examples/rpbi/plot_localizer_rpbi.py | 1 | 4435 | """
Massively univariate analysis of a computation task from the Localizer dataset
==============================================================================
A permuted Ordinary Least Squares algorithm is run at each voxel in
order to determine which voxels are specifically active when a healthy subject
performs a computation task as opposed to a sentence reading task.
Randomized Parcellation Based Inference [1] is also used so as to illustrate
that it conveys more sensitivity.
"""
# Author: Virgile Fritsch, <virgile.fritsch@inria.fr>, Mar. 2014
import numpy as np
from nilearn import datasets
from scipy import linalg
from nilearn.input_data import NiftiMasker
from nilearn.mass_univariate import permuted_ols
from nilearn_sandbox.mass_univariate.rpbi import randomized_parcellation_based_inference
### Load Localizer motor contrast #############################################
n_samples = 20
# localizer_dataset = datasets.fetch_localizer_calculation_task(
# n_subjects=n_samples)
localizer_dataset = datasets.fetch_localizer_contrasts(
["calculation vs sentences"],
n_subjects=n_samples)
### Mask data #################################################################
nifti_masker = NiftiMasker(
memory='nilearn_cache', memory_level=1) # cache options
fmri_masked = nifti_masker.fit_transform(localizer_dataset.cmaps)
### Perform massively univariate analysis with permuted OLS ###################
tested_var = np.ones((n_samples, 1), dtype=float) # intercept
neg_log_pvals, all_scores, h0 = permuted_ols(
tested_var, fmri_masked, model_intercept=False,
n_perm=5000, # 5,000 for the sake of time. 10,000 is recommended
two_sided_test=False, # RPBI does not perform a two-sided test
n_jobs=1) # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
np.ravel(neg_log_pvals))
### Randomized Parcellation Based Inference ###################################
neg_log_pvals_rpbi, _, _ = randomized_parcellation_based_inference(
tested_var, fmri_masked,
np.asarray(nifti_masker.mask_img_.get_data()).astype(bool),
n_parcellations=30, # 30 for the sake of time, 100 is recommended
n_parcels=1000,
threshold='auto',
n_perm=5000, # 5,000 for the sake of time. 10,000 is recommended
random_state=0, memory='nilearn_cache', n_jobs=1, verbose=True)
neg_log_pvals_rpbi_unmasked = nifti_masker.inverse_transform(
neg_log_pvals_rpbi)
### Visualization #############################################################
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
# Here, we should use a structural image as a background, when available.
# Various plotting parameters
z_slice = 39 # plotted slice
from nilearn.image.resampling import coord_transform
affine = neg_log_pvals_unmasked.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice,
linalg.inv(affine))
k_slice = round(k_slice)
threshold = - np.log10(0.1) # 10% corrected
vmax = min(np.amax(neg_log_pvals),
np.amax(neg_log_pvals_rpbi))
# Plot permutation p-values map
fig = plt.figure(figsize=(5, 7), facecolor='k')
display = plot_stat_map(neg_log_pvals_unmasked,
threshold=threshold, cmap=plt.cm.autumn,
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)
neg_log_pvals_data = neg_log_pvals_unmasked.get_data()
neg_log_pvals_slice_data = \
neg_log_pvals_data[..., k_slice]
n_detections = (neg_log_pvals_slice_data > threshold).sum()
title = ('Negative $\log_{10}$ p-values'
'\n(Non-parametric + '
'\nmax-type correction)'
'\n%d detections') % n_detections
display.title(title, y=1.2)
# Plot RPBI p-values map
fig = plt.figure(figsize=(5, 7), facecolor='k')
display = plot_stat_map(neg_log_pvals_rpbi_unmasked,
threshold=threshold, cmap=plt.cm.autumn,
display_mode='z', cut_coords=[z_slice],
figure=fig, vmax=vmax, black_bg=True)
neg_log_pvals_rpbi_data = \
neg_log_pvals_rpbi_unmasked.get_data()
neg_log_pvals_rpbi_slice_data = \
neg_log_pvals_rpbi_data[..., k_slice]
n_detections = (neg_log_pvals_rpbi_slice_data > threshold).sum()
title = ('Negative $\log_{10}$ p-values' + '\n(RPBI)'
'\n%d detections') % n_detections
display.title(title, y=1.2)
plt.show()
| bsd-3-clause |
zutshi/S3CAMR | examples/spi/spi_plant.py | 1 | 2022 |
# Must satisfy the signature
# [t,X,D,P] = sim_function(T,X0,D0,P0,I0);
import numpy as np
from scipy.integrate import ode
import matplotlib.pyplot as PLT
PLOT = True
class SIM(object):
def __init__(self, plt, pvt_init_data):
#print I
# atol = 1e-10
rtol = 1e-5
# tt,YY,dummy_D,dummy_P
self.solver = ode(dyn).set_integrator('dopri5', rtol=rtol)
return
def sim(self, TT, X0, D, P, U, W, property_checker):
if PLOT:
num_dim_x = len(X0)
plot_data = [np.empty(0, dtype=float), np.empty((0, num_dim_x), dtype=float)]
else:
plot_data = None
Ti = TT[0]
Tf = TT[1]
T = Tf - Ti
self.solver.set_solout(solout_fun(property_checker, plot_data)) # (2)
self.solver.set_initial_value(X0, t=0.0)
self.solver.set_f_params(W)
X_ = self.solver.integrate(T)
pvf = property_checker.check(Tf, X_)
dummy_D = np.zeros(D.shape)
dummy_P = np.zeros(P.shape)
ret_t = Tf
ret_X = X_
ret_D = dummy_D
ret_P = dummy_P
if PLOT:
PLT.figure(5)
PLT.plot(plot_data[0], plot_data[1][:, 0])
return (ret_t, ret_X, ret_D, ret_P), pvf
# State Space Modeling Template
# dx/dt = Ax + Bu
# y = Cx + Du
def dyn(t, X, w):
if w > 0:
u = 1.0
elif w < 0:
u = -1.0
else:
u = 0.0
x2 = u
X_ = np.array([x2])
return X_
def solout_fun(property_checker, plot_data):
def solout(t, Y):
if PLOT:
plot_data[0] = np.concatenate((plot_data[0], np.array([t])))
plot_data[1] = np.concatenate((plot_data[1], np.array([Y])))
if property_checker.check(t, Y):
#violating_state[0] = (np.copy(t), np.copy(Y))
# print 'violation found:', violating_state[0]
# return -1 to stop integration
return -1
else:
return 0
return 0
return solout
| bsd-2-clause |
pvcrossi/OnlineCS | online_CS.py | 1 | 4043 | '''
Bayesian Online Compressed Sensing (2016)
Paulo V. Rossi & Yoshiyuki Kabashima
'''
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import norm
from numpy.random import normal
from utils import DlnH, DDlnH, G, H, moments
def simulation(method='standard'):
signal_length = 2000
alpha_max = 20
sigma_n_2 = 1e-1
phi = prior()
P = posterior(signal_length, phi)
x0 = generate_signal(signal_length, phi)
print('Simulation parameters:')
print('N='+str(signal_length)+', sparsity='+str(phi.rho)+
', noise='+str(sigma_n_2)+', alpha_max='+str(alpha_max))
print('Measurement model: '+method+'\n')
number_of_measurements = alpha_max*signal_length
mean_square_error = np.zeros(number_of_measurements)
for measurement in range(number_of_measurements):
P = update_posterior(P, phi, x0, signal_length, sigma_n_2, method)
mean_square_error[measurement] = reconstruction_error(P, x0)
plot_results(P, x0, mean_square_error, phi)
def prior():
phi = namedtuple('prior_distribution', ['rho', 'sigma_x_2', 'bar_x'])
phi.rho = 0.1
phi.sigma_x_2 = 1.
phi.bar_x = 0.
return phi
def posterior(signal_length, phi):
P = namedtuple('posterior_distribution', ['m', 'v', 'a', 'h'])
P.m = np.zeros(signal_length)
P.v = phi.rho * phi.sigma_x_2 * np.ones(signal_length)
P.a = np.zeros(signal_length)
P.h = np.zeros(signal_length)
return P
def generate_signal (signal_length, phi):
x0 = np.zeros(signal_length)
number_of_non_zero_components = int(np.ceil(signal_length*phi.rho))
x0[:number_of_non_zero_components] = normal(loc=phi.bar_x,
scale=np.sqrt(phi.sigma_x_2),
size=number_of_non_zero_components)
return x0
def update_posterior(P, phi, x0, signal_length, sigma_n_2, method):
A_t = measurement_vector(signal_length)
P.a, P.h = update_and_project(method, A_t, x0, sigma_n_2, P)
P.m, P.v = moments(P, phi)
return P
def measurement_vector(signal_length):
A_t = normal(size=signal_length)
return A_t/norm(A_t)
def update_and_project(method, A_t, x0, sigma_n_2, P):
m, v, a, h = P.m, P.v, P.a, P.h
u0 = np.dot(A_t, x0)
if sigma_n_2 > 0:
noise = normal(scale=np.sqrt(sigma_n_2))
else:
noise = 0
y = u0 + noise
Delta = np.dot(A_t, m)
chi = np.dot(A_t**2, v)
if method == 'standard':
da, dh = update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m)
elif method == '1bit':
da, dh = update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m)
else:
raise ValueError('Measurement model not recognized. Please use "standard" or "1bit".')
return a+da, h+dh
def update_and_project_std(y, Delta, chi, sigma_n_2, A_t, m):
da = A_t**2 / (sigma_n_2 + chi)
dh = (y-Delta)*A_t / (sigma_n_2 + chi) + da*m
return da, dh
def update_and_project_1bit(y, Delta, chi, sigma_n_2, A_t, m):
y = np.sign(y)
u = y * np.dot(A_t, m)
chi_prime = chi + sigma_n_2
z = -u/np.sqrt(chi_prime)
da = -A_t**2/chi_prime * DDlnH(z)
dh = -y*A_t/np.sqrt(chi_prime) * DlnH(z) + da*m
return da, dh
def reconstruction_error(P, x0):
return norm(x0 - P.m)**2 / norm(x0)**2
def plot_results(P, x0, mse_t, phi):
plt.subplots(figsize=(10,20))
plt.subplot(211)
plt.plot(np.arange(len(mse_t))/float(len(P.m)), 10*np.log10(mse_t), color='k')
plt.xlabel(r'$\alpha$')
plt.ylabel(r'mse (dB)')
plt.subplot(212)
plt.plot(P.m, color='k', lw = 0.7, label=r'$m$')
plt.scatter(range(int(len(x0)*phi.rho)), x0[:int(len(x0)*phi.rho)], \
marker='o', facecolors='none', edgecolors='r', lw=1.5, label=r'$x^0$')
plt.xlim([0,len(P.m)])
plt.xlabel(r'Vector Component')
plt.legend()
plt.show()
if __name__ == '__main__':
simulation(method='1bit')
#simulation(method='standard')
| mit |
pySTEPS/pysteps | examples/plot_optical_flow.py | 1 | 5240 | """
Optical flow
============
This tutorial offers a short overview of the optical flow routines available in
pysteps and it will cover how to compute and plot the motion field from a
sequence of radar images.
"""
from datetime import datetime
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
from pysteps import io, motion, rcparams
from pysteps.utils import conversion, transformation
from pysteps.visualization import plot_precip_field, quiver
################################################################################
# Read the radar input images
# ---------------------------
#
# First, we will import the sequence of radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201505151630", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the input files from the archive
fns = io.archive.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=9
)
# Read the radar composites
importer = io.get_method(importer_name, "importer")
R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
del quality # Not used
###############################################################################
# Preprocess the data
# ~~~~~~~~~~~~~~~~~~~
# Convert to mm/h
R, metadata = conversion.to_rainrate(R, metadata)
# Store the reference frame
R_ = R[-1, :, :].copy()
# Log-transform the data [dBR]
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
# Nicely print the metadata
pprint(metadata)
################################################################################
# Lucas-Kanade (LK)
# -----------------
#
# The Lucas-Kanade optical flow method implemented in pysteps is a local
# tracking approach that relies on the OpenCV package.
# Local features are tracked in a sequence of two or more radar images. The
# scheme includes a final interpolation step in order to produce a smooth
# field of motion vectors.
oflow_method = motion.get_method("LK")
V1 = oflow_method(R[-3:, :, :])
# Plot the motion field on top of the reference frame
plot_precip_field(R_, geodata=metadata, title="LK")
quiver(V1, geodata=metadata, step=25)
plt.show()
################################################################################
# Variational echo tracking (VET)
# -------------------------------
#
# This module implements the VET algorithm presented
# by Laroche and Zawadzki (1995) and used in the McGill Algorithm for
# Prediction by Lagrangian Extrapolation (MAPLE) described in
# Germann and Zawadzki (2002).
# The approach essentially consists of a global optimization routine that seeks
# at minimizing a cost function between the displaced and the reference image.
oflow_method = motion.get_method("VET")
V2 = oflow_method(R[-3:, :, :])
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="VET")
quiver(V2, geodata=metadata, step=25)
plt.show()
################################################################################
# Dynamic and adaptive radar tracking of storms (DARTS)
# -----------------------------------------------------
#
# DARTS uses a spectral approach to optical flow that is based on the discrete
# Fourier transform (DFT) of a temporal sequence of radar fields.
# The level of truncation of the DFT coefficients controls the degree of
# smoothness of the estimated motion field, allowing for an efficient
# motion estimation. DARTS requires a longer sequence of radar fields for
# estimating the motion, here we are going to use all the available 10 fields.
oflow_method = motion.get_method("DARTS")
R[~np.isfinite(R)] = metadata["zerovalue"]
V3 = oflow_method(R) # needs longer training sequence
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="DARTS")
quiver(V3, geodata=metadata, step=25)
plt.show()
################################################################################
# Anisotropic diffusion method (Proesmans et al 1994)
# ---------------------------------------------------
#
# This module implements the anisotropic diffusion method presented in Proesmans
# et al. (1994), a robust optical flow technique which employs the notion of
# inconsitency during the solution of the optical flow equations.
oflow_method = motion.get_method("proesmans")
R[~np.isfinite(R)] = metadata["zerovalue"]
V4 = oflow_method(R[-2:, :, :])
# Plot the motion field
plot_precip_field(R_, geodata=metadata, title="Proesmans")
quiver(V4, geodata=metadata, step=25)
plt.show()
# sphinx_gallery_thumbnail_number = 1
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/units/evans_test.py | 3 | 2335 | """
A mockup "Foo" units class which supports
conversion and different tick formatting depending on the "unit".
Here the "unit" is just a scalar conversion factor, but this example shows mpl is
entirely agnostic to what kind of units client packages use
"""
import matplotlib
from matplotlib.cbook import iterable
import matplotlib.units as units
import matplotlib.ticker as ticker
from pylab import figure, show
class Foo:
def __init__( self, val, unit=1.0 ):
self.unit = unit
self._val = val * unit
def value( self, unit ):
if unit is None: unit = self.unit
return self._val / unit
class FooConverter:
@staticmethod
def axisinfo(unit, axis):
'return the Foo AxisInfo'
if unit==1.0 or unit==2.0:
return units.AxisInfo(
majloc = ticker.IndexLocator( 8, 0 ),
majfmt = ticker.FormatStrFormatter("VAL: %s"),
label='foo',
)
else:
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence
"""
if units.ConversionInterface.is_numlike(obj):
return obj
if iterable(obj):
return [o.value(unit) for o in obj]
else:
return obj.value(unit)
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
else:
return x.unit
units.registry[Foo] = FooConverter()
# create some Foos
x = []
for val in range( 0, 50, 2 ):
x.append( Foo( val, 1.0 ) )
# and some arbitrary y data
y = [i for i in range( len(x) ) ]
# plot specifying units
fig = figure()
fig.suptitle("Custom units")
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(1,2,2)
ax.plot( x, y, 'o', xunits=2.0 )
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
ax.set_title("xunits = 2.0")
# plot without specifying units; will use the None branch for axisinfo
ax = fig.add_subplot(1,2,1)
ax.plot( x, y ) # uses default units
ax.set_title('default units')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
show()
| gpl-2.0 |
aweimann/traitar | traitar/heatmap.py | 1 | 19822 | #!/usr/bin/env python
#adapted from Nathan Salomonis: http://code.activestate.com/recipes/578175-hierarchical-clustering-heatmap-python/
import matplotlib as mpl
#pick non-x display
mpl.use('Agg')
import matplotlib.pyplot as pylab
import scipy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
import numpy
import string
import time
import sys, os
import getopt
import numpy as np
import pandas as ps
from .PhenotypeCollection import PhenotypeCollection
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
#ignore these warnings
#/usr/lib/pymodules/python2.7/matplotlib/collections.py:548: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
# if self._edgecolors == 'face':
#/usr/lib/pymodules/python2.7/matplotlib/backends/backend_pdf.py:2184: FutureWarning: comparison to `None` will result in an elementwise object comparison in the future.
# different = bool(ours != theirs)
################# Perform the hierarchical clustering #################
def heatmap(x, row_header, column_header, primary_pt_models, color_f, row_method,
column_method, row_metric, column_metric,
filename, sample_f, secondary_pt_models):
print "\nrunning hiearchical clustering using %s for columns and %s for rows" % (column_metric,row_metric)
"""
This below code is based in large part on the protype methods:
http://old.nabble.com/How-to-plot-heatmap-with-matplotlib--td32534593.html
http://stackoverflow.com/questions/7664826/how-to-get-flat-clustering-corresponding-to-color-clusters-in-the-dendrogram-cre
x is an m by n ndarray, m observations, n genes
"""
### Define the color gradient to use based on the provided name
#if color_gradient == 'red_white_blue':
# cmap=pylab.cm.bwr
#if color_gradient == 'red_black_sky':
# cmap=RedBlackSkyBlue()
#if color_gradient == 'red_black_blue':
# cmap=RedBlackBlue()
#if color_gradient == 'red_black_green':
# cmap=RedBlackGreen()
#if color_gradient == 'yellow_black_blue':
# cmap=YellowBlackBlue()
#if color_gradient == 'seismic':
# cmap=pylab.cm.seismic
#if color_gradient == 'green_white_purple':
# cmap=pylab.cm.PiYG_r
#if color_gradient == 'coolwarm':
# cmap=pylab.cm.coolwarm
### Scale the max and min colors so that 0 is white/black
#vmin=x.min()
#vmax=x.max()
#vmax = max([vmax,abs(vmin)])
#vmin = vmax*-1
#norm = mpl.colors.Normalize(vmin/2, vmax/2) ### adjust the max and min to scale these colors
### Scale the Matplotlib window size
default_window_hight = 10.5
default_window_width = 10
fig = pylab.figure(figsize=(default_window_width,default_window_hight)) ### could use m,n to scale here
color_bar_w = 0.015 ### Sufficient size to show
color_bar_w = 0.015 ### Sufficient size to show
## calculate positions for all elements
# ax1, placement of dendrogram 1, on the left of the heatmap
#if row_method != None: w1 =
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05,0.42,0.2,0.4] ### The second value controls the position of the matrix relative to the bottom of the view
width_between_ax1_axr = 0.004
height_between_ax1_axc = 0.004 ### distance between the top color bar axis and the matrix
# axr, placement of row side colorbar
[axr_x, axr_y, axr_w, axr_h] = [0.31,0.1,color_bar_w,0.6] ### second to last controls the width of the side color bar - 0.015 when showing
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar
[axc_x, axc_y, axc_w, axc_h] = [0.4,0.63,0.5,color_bar_w] ### last one controls the hight of the top color bar - 0.015 when showing
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix
[axm_x, axm_y, axm_w, axm_h] = [0.4,0.9,2.5,0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3,0.72,0.6,0.15] ### last one controls hight of the dendrogram
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# placement of the phenotype legend
[axpl_x, axpl_y, axpl_w, axpl_h] = [0.78,0.84,0.05,0.13]
# placement of the sample legend
# axcb - placement of the sample legend
[axsl_x, axsl_y, axsl_w, axsl_h] = [0.05,0.29,0.05,0.09]
# axcb - placement of the color legend
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.05, 0.88,0.05,0.09]
# Compute and plot top dendrogram
if not column_method is None and x.shape[1] > 1:
start_time = time.time()
d2 = dist.pdist(x.T)
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Y2 = sch.linkage(D2, method=column_method, metric=column_metric) ### array-clustering metric - 'average', 'single', 'centroid', 'complete'
Z2 = sch.dendrogram(Y2)
ind2 = sch.fcluster(Y2,0.7*max(Y2[:,2]),'distance') ### This is the default behavior of dendrogram
time_diff = str(round(time.time()-start_time,1))
ax2.set_xticks([]) ### Hides ticks
ax2.set_yticks([])
#print 'Column clustering completed in %s seconds' % time_diff
else:
ind2 = ['NA']*len(column_header) ### Used for exporting the flat cluster data
# Compute and plot left dendrogram.
if not row_method is None and x.shape[0] > 1:
start_time = time.time()
x_bin = x.copy()
x_bin[x_bin > 0] = 1
d1 = dist.pdist(x_bin)
D1 = dist.squareform(d1) # full matrix
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True) # frame_on may be False
ax1.set_xticks([]) ### Hides ticks
ax1.set_yticks([])
Y1 = sch.linkage(D1, method=row_method, metric=row_metric) ### gene-clustering metric - 'average', 'single', 'centroid', 'complete'
Z1 = sch.dendrogram(Y1, orientation='right')
ind1 = sch.fcluster(Y1,0.7*max(Y1[:,2]),'distance') ### This is the default behavior of dendrogram
time_diff = str(round(time.time()-start_time,1))
#print 'Row clustering completed in %s seconds' % time_diff
else:
ind1 = ['NA']*len(row_header) ### Used for exporting the flat cluster data
# Plot heatmap color legend
n = len(x[0]); m = len(x)
if secondary_pt_models is not None:
cmaplist = np.array([[247,247,247],[166,206,227],[178,223,138],[31,120,180]])/256.0
else:
cmaplist = np.array([[247,247,247],[31,120,180]])/256.0
cmap = mpl.colors.ListedColormap(cmaplist)
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False) # axes for colorbar
#cb = mpl.colorbar.ColorbarBase(axcb, cmap=cmap, orientation='horizontal')
bounds = numpy.linspace(0, len(cmaplist), len(cmaplist) + 1)
norm = mpl.colors.BoundaryNorm(bounds, len(cmaplist))
cb = mpl.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
if secondary_pt_models is not None:
axcb.set_yticklabels(["negative", "%s positive" % primary_pt_models.get_name(), "%s positive" % secondary_pt_models.get_name(), "both predictors positive"], fontsize = 8)
axcb.yaxis.set_ticks([0.125, 0.375, 0.625, 0.875])
else:
axcb.set_yticklabels(["%s negative" % primary_pt_models.get_name(), "%s positive" % primary_pt_models.get_name()], fontsize = 8)
axcb.yaxis.set_ticks([0.25, 0.75])
axcb.set_title("Heatmap colorkey", fontsize = 10, loc = "left")
# Plot distance matrix.
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h]) # axes for the data matrix
xt = x
if not column_method is None and x.shape[1] > 1:
idx2 = Z2['leaves'] ### apply the clustering for the array-dendrograms to the actual matrix data
xt = xt[:,idx2]
ind2 = ind2[idx2] ### reorder the flat cluster to match the order of the leaves the dendrogram
pass
if not row_method is None and x.shape[0] > 1 :
idx1 = Z1['leaves'] ### apply the clustering for the gene-dendrograms to the actual matrix data
xt = xt[idx1,:] # xt is transformed x
ind1 = ind1[idx1] ### reorder the flat cluster to match the order of the leaves the dendrogram
### taken from http://stackoverflow.com/questions/2982929/plotting-results-of-hierarchical-clustering-ontop-of-a-matrix-of-data-in-python/3011894#3011894
im = axm.matshow(xt, aspect='auto', origin='lower', cmap=cmap, norm=norm) ### norm=norm added to scale coloring of expression with zero = white or black
axm.set_xticks([]) ### Hides x-ticks
axm.set_yticks([])
# Add text
new_row_header=[]
new_column_header=[]
for i in range(x.shape[0]):
margin = 0
if len(row_header) > 0 :
fontdict = {'fontsize': 7}
if len(row_header) > 30 :
fontdict = {'fontsize': 7}
margin = 0.5
if len(row_header) > 50 :
fontdict = {'fontsize': 4}
if len(row_header) > 100 :
fontdict = {'fontsize': 2}
if len(row_header) > 200:
fontdict = {'fontsize': 1}
#if len(row_header)<100: ### Don't visualize gene associations when more than 100 rows
axm.plot([-0.5, len(column_header)], [i - 0.5, i - 0.5], color = 'black', ls = '-')
if x.shape[0] > 1 and row_method is not None:
label = row_header[idx1[i]]
else:
label = row_header[i]
fontdict.items
axm.text(x.shape[1] + 0.2, i - margin , ' ' + label, fontdict = fontdict)
new_row_header.append(label)
for i in range(x.shape[1]):
if not column_method is None and x.shape[1] > 1:
axm.plot([i-0.5, i-0.5], [-0.5, len(row_header) - 0.5], color = 'black', ls = '-')
axm.text(i-0.5, -0.5, ' '+ column_header[idx2[i]], fontdict = {'fontsize': 7}, rotation=270, verticalalignment="top") # rotation could also be degrees
new_column_header.append(column_header[idx2[i]])
else: ### When not clustering columns
axm.plot([i-0.5, i-0.5], [-0.5, len(row_header) - 0.5], color = 'black', ls = '-')
axm.text(i-0.5, -0.8, ' '+column_header[i], fontdict = {'fontsize': 7}, rotation=270, verticalalignment="top")
new_column_header.append(column_header[i])
pt2acc = primary_pt_models.get_pt2acc()
pt2acc.index = pt2acc.loc[:, "accession"]
#colors
colors = ps.read_csv(color_f, index_col = None, sep = "\t")
if "category" in pt2acc.columns:
#assign categories to colors
import sets
#get unique categories in the order they appear in the pt mapping table
cats = sorted(set(pt2acc.loc[:, "category"].tolist()), key=lambda x: pt2acc.loc[:, "category"].tolist().index(x))
if not colors.shape[0] < len(cats):
# Plot phenotype legend
axpl = fig.add_axes([axpl_x, axpl_y, axpl_w, axpl_h], frame_on=False) # axes for colorbar
#for i in pt2cat2col.index:
# if pt2cat2col.loc[i,"Category"] not in cat2col:
# cat2col[pt2cat2col.loc[i,"Category"]] = pt2cat2col.loc[i, ["r", "g", "b"]]
# col2id[pt2cat2col.loc[i,"Category"]] = j
# j += 1
pt2cat = dict([(pt2acc.loc[i, "accession"], pt2acc.loc[i, "category"]) for i in pt2acc.index])
cat2id = dict([(cats[i - 1], i) for i in range(1, len(cats) + 1)])
cmaplist = ps.DataFrame(colors.iloc[:len(cats),])
cmaplist.index = cats
cmaplist = cmaplist / 256.0
cmap_p = mpl.colors.ListedColormap(cmaplist.values)
bounds = numpy.linspace(0, cmaplist.shape[0], cmaplist.shape[0] + 1)
norm = mpl.colors.BoundaryNorm(bounds, cmaplist.shape[0])
cb = mpl.colorbar.ColorbarBase(axpl, cmap=cmap_p, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
axpl.set_yticklabels([i for i in cats], fontsize = 6)
axpl.yaxis.set_ticks(np.arange(1.0 / len(cats) / 2, 1, 1.0 / len(cats)))
axpl.set_title("Phenotype colorkey", fontsize = 10, loc = "left")
# Plot colside colors
# axc --> axes for column side colorbar
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h]) # axes for column side colorbar
dc = numpy.array([cat2id[pt2cat[i]] for i in column_header]).T
if x.shape[1] > 1 and column_method is not None:
dc = dc[idx2]
dc.shape = (1, x.shape[1])
im_c = axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_p)
axc.set_xticks([]) ### Hides ticks
axc.set_yticks([])
# Plot rowside colors
if sample_f is not None and x.shape[0] > 1:
samples = ps.read_csv(sample_f, sep = "\t", index_col = "sample_name")
if "category" in samples.columns:
#get unique sample categories and sort according to the order they appear in the sampling file
sample_cats = sorted(set(samples.loc[:, "category"].tolist()), key = lambda x: samples.loc[:, "category"].tolist().index(x))
cat2col = dict([(sample_cats[i - 1], i) for i in range(1, len(sample_cats) + 1)])
cmaplist = ps.DataFrame(colors.iloc[:len(sample_cats),]) / 256.0
cmap_p = mpl.colors.ListedColormap(cmaplist.values)
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h]) # axes for row side colorbar
dr = numpy.array([cat2col[samples.loc[i, "category"]] for i in row_header]).T
if row_method is not None:
dr = dr[idx1]
dr.shape = (samples.shape[0], 1)
#cmap_r = mpl.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
im_r = axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_p)
axr.set_xticks([]) ### Hides ticks
axr.set_yticks([])
# Plot sample legend
axsl = fig.add_axes([axsl_x, axsl_y, axsl_w, axsl_h], frame_on=False) # axes for colorbar
bounds = numpy.linspace(0, len(sample_cats), len(sample_cats) + 1)
norm = mpl.colors.BoundaryNorm(bounds, len(sample_cats))
cb = mpl.colorbar.ColorbarBase(axsl, cmap=cmap_p, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds)
axsl.yaxis.set_ticks(np.arange(1.0 / len(sample_cats) / 2, 1, 1.0 / len(sample_cats)))
axsl.set_yticklabels([i for i in sample_cats], fontsize = 6)
axsl.set_title("Sample colorkey", loc = "left", fontsize = 10)
#exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2)
### Render the graphic
if len(row_header)>50 or len(column_header)>50:
pylab.rcParams['font.size'] = 6
else:
pylab.rcParams['font.size'] = 8
pylab.savefig(filename)
pylab.savefig(filename, dpi=300) #,dpi=200
#pylab.show()
def getColorRange(x):
""" Determines the range of colors, centered at zero, for normalizing cmap """
vmax=x.max()
vmin=x.min()
if vmax<0 and vmin<0: direction = 'negative'
elif vmax>0 and vmin>0: direction = 'positive'
else: direction = 'both'
if direction == 'both':
vmax = max([vmax,abs(vmin)])
vmin = -1*vmax
return vmax,vmin
else:
return vmax,vmin
################# Export the flat cluster data #################
def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):
""" Export the clustered results as a text file, only indicating the flat-clusters rather than the tree """
filename = string.replace(filename,'.pdf','.txt')
export_text = open(filename,'w')
column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\t')+'\n' ### format column-names for export
export_text.write(column_header)
column_clusters = string.join(['column_clusters-flat','']+ map(str, ind2),'\t')+'\n' ### format column-flat-clusters for export
export_text.write(column_clusters)
### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match
new_row_header = new_row_header[::-1]
xt = xt[::-1]
### Export each row in the clustered data matrix xt
i=0
for row in xt:
export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\t')+'\n')
i+=1
export_text.close()
### Export as CDT file
filename = string.replace(filename,'.txt','.cdt')
export_cdt = open(filename,'w')
column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\t')+'\n' ### format column-names for export
export_cdt.write(column_header)
eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\t')+'\n' ### format column-flat-clusters for export
export_cdt.write(eweight)
### Export each row in the clustered data matrix xt
i=0
for row in xt:
export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\t')+'\n')
i+=1
export_cdt.close()
################# Create Custom Color Gradients #################
#http://matplotlib.sourceforge.net/examples/pylab_examples/custom_cmap.html
def RedBlackSkyBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def RedBlackGreen():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
def YellowBlackBlue():
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8),
(0.5, 0.1, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
### yellow is created by adding y = 1 to RedBlackSkyBlue green last tuple
### modulate between blue and cyan using the last y var in the first green tuple
my_cmap = mpl.colors.LinearSegmentedColormap('my_colormap',cdict,256)
return my_cmap
| gpl-3.0 |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/comparing_explore_exploit_methods.py | 1 | 2913 | import numpy as np
import matplotlib.pyplot as plt
from comparing_epsilons import Bandit
from optimistic_initial_values import run_experiment as run_experiment_oiv
from ucb1 import run_experiment as run_experiment_ucb
class BayesianBandit:
def __init__(self, true_mean):
self.true_mean = true_mean
# parameters for mu - prior is N(0,1)
self.predicted_mean = 0
self.lambda_ = 1
self.sum_x = 0 # for convenience
self.tau = 1
def pull(self):
return np.random.randn() + self.true_mean
def sample(self):
return np.random.randn() / np.sqrt(self.lambda_) + self.predicted_mean
def update(self, x):
self.lambda_ += self.tau
self.sum_x += x
self.predicted_mean = self.tau*self.sum_x / self.lambda_
def run_experiment_decaying_epsilon(m1, m2, m3, N):
bandits = [Bandit(m1), Bandit(m2), Bandit(m3)]
data = np.empty(N)
for i in range(N):
# epsilon greedy
p = np.random.random()
if p < 1.0/(i+1):
j = np.random.choice(3)
else:
j = np.argmax([b.mean for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print(b.mean)
return cumulative_average
def run_experiment(m1, m2, m3, N):
bandits = [BayesianBandit(m1), BayesianBandit(m2), BayesianBandit(m3)]
data = np.empty(N)
for i in range(N):
# optimistic initial values
j = np.argmax([b.sample() for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
return cumulative_average
if __name__ == '__main__':
m1 = 1.0
m2 = 2.0
m3 = 3.0
eps = run_experiment_decaying_epsilon(m1, m2, m3, 100000)
oiv = run_experiment_oiv(m1, m2, m3, 100000)
ucb = run_experiment_ucb(m1, m2, m3, 100000)
bayes = run_experiment(m1, m2, m3, 100000)
# log scale plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(eps, label='decaying-epsilon-greedy')
plt.plot(oiv, label='optimistic')
plt.plot(ucb, label='ucb1')
plt.plot(bayes, label='bayesian')
plt.legend()
plt.show()
| apache-2.0 |
diego0020/PySurfer | examples/plot_label.py | 4 | 1526 | """
Display ROI Labels
==================
Using PySurfer you can plot Freesurfer cortical labels on the surface
with a large amount of control over the visual representation.
"""
import os
from surfer import Brain
print(__doc__)
subject_id = "fsaverage"
hemi = "lh"
surf = "smoothwm"
brain = Brain(subject_id, hemi, surf)
# If the label lives in the normal place in the subjects directory,
# you can plot it by just using the name
brain.add_label("BA1")
# Some labels have an associated scalar value at each ID in the label.
# For example, they may be probabilistically defined. You can threshold
# what vertices show up in the label using this scalar data
brain.add_label("BA1", color="blue", scalar_thresh=.5)
# Or you can give a path to a label in an arbitrary location
subj_dir = os.environ["SUBJECTS_DIR"]
label_file = os.path.join(subj_dir, subject_id,
"label", "%s.MT.label" % hemi)
brain.add_label(label_file)
# By default the label is 'filled-in', but you can
# plot just the label boundaries
brain.add_label("BA44", borders=True)
# You can also control the opacity of the label color
brain.add_label("BA6", alpha=.7)
# Finally, you can plot the label in any color you want.
brain.show_view(dict(azimuth=-42, elevation=105, distance=225,
focalpoint=[-30, -20, 15]))
# Use any valid matplotlib color.
brain.add_label("V1", color="steelblue", alpha=.6)
brain.add_label("V2", color="#FF6347", alpha=.6)
brain.add_label("entorhinal", color=(.2, 1, .5), alpha=.6)
| bsd-3-clause |
akpetty/ibtopo2016 | calc_multi_atm.py | 1 | 9475 | ##############################################################
# Date: 20/01/16
# Name: calc_multi_atm.py
# Author: Alek Petty
# Description: Main script to calculate sea ice topography from IB ATM data
# Input requirements: ATM data, PosAV data (for geolocation)
# Output: topography datasets
import matplotlib
matplotlib.use("AGG")
import IB_functions as ro
import mpl_toolkits.basemap.pyproj as pyproj
from osgeo import osr, gdal
from pyproj import Proj
from glob import glob
from pylab import *
from scipy import ndimage
from matplotlib import rc
#from scipy.interpolate import griddata
from matplotlib.mlab import griddata
import time
import scipy.interpolate
import h5py
from scipy.spatial import cKDTree as KDTree
import os
def calc_bulk_stats():
ice_area=-999
ridge_area_all=-999
mean_ridge_height_all=-999
mean_ridge_heightL=-999
ridge_areaL=-999
num_ridges_out=-999
levpercent_out=-999
num_pts_section=-999
# IF SECTION GOOD THEN GET ICE SWATH AREA
if (points_good==1):
ice_area = ma.count(elevation2d)*(xy_res**2)
levpercent_out=levpercent
# IF SECTION GOOD AND HAVE SOME RIDGING THEN ASSIGN TOTAL RIDGE AREA AND ELEVATION
if ((points_good==1)&(found_ridges==1)):
ridge_area_all = ma.count(elevation2d_ridge_ma)*(xy_res**2)
mean_ridge_height_all = np.mean(elevation2d_ridge_ma) - level_elev
# IF SECTION GOOD AND WE HAVE NO RIDGING (AREA OF RIDGING = 0) THEN ASSIGN ZERO RIDGE AREA HEIGHT
if ((points_good==1)&(found_ridges==0)):
ridge_area_all = 0.
mean_ridge_height_all = 0.
#IF GOOD SECTION BUT NO BIG RIDGES THEN SET THESE VALUES TO ZERO
if ((points_good==1)&(found_big_ridge==0)):
mean_ridge_heightL=0.
ridge_areaL=0.
num_ridges_out=0
# IF WE FOUND SOME BIG RIDGES THENA SSIGN BIG RIDGE AREA HEIGHT AND NUMBER
if ((points_good==1)&(found_big_ridge==1)):
mean_ridge_heightL = np.mean(ridge_height_mesh)
ridge_areaL = ma.count(ridge_height_mesh)*(xy_res**2)
num_ridges_out = num_ridges
return [mean_x, mean_y, ice_area, num_ridges_out, ridge_area_all, ridge_areaL, mean_ridge_height_all, mean_ridge_heightL, mean_alt, mean_pitch, mean_roll, mean_vel, num_pts_section,levpercent_out, section_num, found_ridges, points_good, plane_good]
#-------------- ATM AND DMS PATHS------------------
datapath='./Data_output/'
rawdatapath = '../../../DATA/ICEBRIDGE/'
ATM_path = rawdatapath+'/ATM/ARCTIC/'
posAV_path =rawdatapath+'/POSAV/SEA_ICE/GR/'
#posAV_path ='/Volumes/TBOLT_HD_PETTY/POSAV/'
m=pyproj.Proj("+init=EPSG:3413")
#FREE PARAMETERS
min_ridge_height = 0.2
along_track_res=1000
pwidth=20
pint=5
xy_res=2
start_year=2009
end_year=2009
min_ridge_size=100
sh=0
if (sh==1):
print 'Ridge threshold:', sys.argv[1]
print 'Along track res:',sys.argv[2]
print 'xy res:',sys.argv[3]
print 'Start year:',sys.argv[4]
print 'End year:',sys.argv[5]
min_ridge_height = float(sys.argv[1])
along_track_res = int(sys.argv[2])
xy_res = int(sys.argv[3])
start_year=int(sys.argv[4])
end_year=int(sys.argv[5])
pts_threshold=15000
num_points_req = min_ridge_size/(xy_res**2)
section_num=0
print 'Num points req', num_points_req
ftype = str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm'
outpath = datapath+ftype+'/'
for year in xrange(start_year, end_year+1):
ATM_year = ATM_path+str(year)+'/'
atm_files_year = glob(ATM_year+'/*/')
#for days in xrange():
for days in xrange(size(atm_files_year)):
atm_path_date = atm_files_year[days]
print 'ATM day:', atm_path_date
atm_files_in_day = ro.get_atm_files(atm_path_date, year)
#load POS file
posAV = loadtxt(posAV_path+str(year)+'_GR_NASA/sbet_'+str(atm_path_date[-9:-1])+'.out.txt', skiprows=1)
#GET POSITION OF PLANE AND 1km MARKERS FROM POSAV
xp, yp, dist, km_idxs, km_utc_times = ro.get_pos_sections(posAV, m, along_track_res)
for atm_file in xrange(size(atm_files_in_day)):
atm_statsALL=np.array([]).reshape(0,3)
ridge_statsALL=np.array([]).reshape(0,9)
covarALL=np.array([]).reshape(0,5)
bulk_statsALL=np.array([]).reshape(0,18)
print 'ATM file:', atm_files_in_day[atm_file], str(atm_file)+'/'+str(size(atm_files_in_day))
lonT, latT, elevationT, utc_timeT= ro.get_atmqih5(atm_files_in_day[atm_file], year, 1)
#IF SIZE OF DATA IS LESS THAN SOME THRESHOLD THEN DONT BOTHER ANALYZING
if (size(utc_timeT)<100):
break
xT, yT = m(lonT, latT)
#GET POSAV INDICES COINCIDING WITH START AND END OF ATM FILE. ADD PLUS/MINUS 1 FOR SOME LEEWAY.
start_i = np.abs(km_utc_times - utc_timeT[0]).argmin()
end_i = np.abs(km_utc_times - utc_timeT[-1]).argmin()
print 'START/END:', start_i, end_i
for i in xrange(start_i -1, end_i + 1):
section_num+=1
found_ridges=0
found_big_ridge=0
plane_good=0
points_good=0
ridge_statsT = np.array([]).reshape(0,9)
cov_matrix = np.array([]).reshape(0,5)
#label_numsL=np.array(0)
mean_x, mean_y, mean_alt, mean_pitch, mean_roll, mean_vel = ro.posav_section_info(m, posAV[km_idxs[i]:km_idxs[i+1]] )
print ' '
print str(i)+'/'+str(end_i + 1)
print 'Mean altitude:', mean_alt
print 'Mean pitch:', mean_pitch
print 'Mean roll:', mean_roll
print 'Mean vel:', mean_vel
if (abs(mean_alt-500)<200) & (abs(mean_pitch)<5) & (abs(mean_roll)<5):
plane_good=1
poly_path, vertices, sides = ro.get_pos_poly(xp, yp, km_idxs[i], km_idxs[i+1])
xatm_km, yatm_km, elevation_km = ro.get_atm_poly(xT, yT, elevationT, km_utc_times, utc_timeT, poly_path, i)
num_pts_section = size(xatm_km)
print 'Num pts in section:', size(xatm_km)
#if there are more than 15000 pts in the 1km grid (average of around 20000) then proceed
if (num_pts_section>pts_threshold):
points_good=1
#ro.plot_atm_poly(m, xatm_km, yatm_km, elevation_km, poly_path, i, out_path, year)
#GET ATM GRID
xx2d, yy2d = ro.grid_atm(xatm_km, yatm_km, xy_res)
print 'Grid:', size(xx2d[0]), size(xx2d[1])
# CALCULATE THE LEVEL ICE SURFACE USING THE CUMULATIVE DISTRIBUTION
#THRESH IS THE LEVEL ICE PLUS RIDGED ICE ELEVATION
level_elev, thresh, levpercent = ro.calc_level_ice(elevation_km, pint, pwidth, min_ridge_height)
#level_elev, thresh, levpercent = ro.calc_level_ice(elevation_km, pwidth, min_ridge_height)
elevation2d, elevation2d_ridge_ma, ridge_area = ro.grid_elevation(xatm_km, yatm_km,elevation_km, xx2d, yy2d, thresh, kdtree=1)
elevation2d_ridge_maL =elevation2d_ridge_ma-level_elev
#IF THERE IS EVEN A LITTLE BIT OF RIDGING (might not actually be enough for a big areal ridge from the labelling) then proceed to clean up data.
if (ridge_area>0):
found_ridges=1
#CLEAN UP DATA WITH KDTREE AROUND RIDGE POINTS
#REMOVE FOR PRELIMINARY STUDIES AS COMPUTATIONALLY EXPENSIVE!
#elevation2d_ridge_ma = kdtree_clean()
#GET RIDGE LABELS - MAKE SURE RIDGES ARE ABOVE CERTAIN SIZE, DECIDED BY NUM_PTS_REQ
label_im = ro.get_labels(elevation2d_ridge_maL, xy_res, min_ridge_size, min_ridge_height)
# DECIDE IF WE WANT TO CALCULATE RIDGE ORIENTATION OR NOT.
if (np.amax(label_im)>=1):
found_big_ridge=1
print 'Found Ridge!'
print 'Number of labels:', np.amax(label_im)
num_ridges = np.amax(label_im)
#GET RIDGE STATS IF WE DID FIND A RIDGE
ridge_statsT, ridge_height_mesh, cov_matrix, indexT = ro.calc_ridge_stats(elevation2d_ridge_ma, num_ridges, label_im, xx2d, yy2d, level_elev, section_num, calc_orientation=1)
#CALCULATE BULK STATISTICS AS WE HAVE VALID NUMBER OF POINTS WITHIN THE SECTION
else:
print 'No data - WHY?! --------------'
print 'Num pts in section:', size(xatm_km)
#ASSIGN BULK STATISTICS AS WE HAVE NOT CARRIED OUT RIDGE CALCULATION AS PLANE IS DOING FUNNY THINGS
bulk_statsT = calc_bulk_stats()
ridge_statsALL = vstack([ridge_statsALL, ridge_statsT])
covarALL = vstack([covarALL, cov_matrix])
bulk_statsALL = vstack([bulk_statsALL, bulk_statsT])
if not os.path.exists(outpath+str(year)):
os.makedirs(outpath+str(year))
ridge_statsALL.dump(outpath+str(year)+'/ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
covarALL.dump(outpath+str(year)+'/cov_matrix_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
bulk_statsALL.dump(outpath+str(year)+'/bulk_ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file).zfill(3)+'.txt')
#CAN OUTPUT AS TEXT FILES INSTEAD - BIGGER BUT CAN OPEN RAW
#savetxt(outpath+str(year)+'/ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', ridge_statsALL)
#savetxt(outpath+str(year)+'/cov_matrix_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', covarALL)
#savetxt(outpath+str(year)+'/bulk_ridge_stats_'+str(int(along_track_res/1000))+'km_xyres'+str(xy_res)+'m_'+str(int(min_ridge_height*100))+'cm_poly'+str(atm_path_date[-9:-1])+'_f'+str(atm_file)+'.txt', bulk_statsALL)
| gpl-3.0 |
ishank08/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
alpenwasser/laborjournal | versuche/skineffect/python/stuetzpunkte_new_lowfreq.py | 1 | 6373 | #!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# Magnetic field inside copper coil with hollow copper cylinder #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Define Variables and Constants #
# ---------------------------------------------------------#
npts = 19 # careful: number of points is npts + 1 (starts at 0)
#fmin = 5e-4
#fmax = 5e-2
fmin = 0.1
fmax = 0.2
highest_frequency = fmin * exp(log(fmax-fmin))
freq_max_ratio = highest_frequency / fmax
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_old = 'magenta'
plot_color_new = 'blue'
plot_color_common = 'black'
plot_label_points_old = r"St\"utzpunktformel A: $\displaystyle f_{kA} = f_{min} \cdot exp\Biggr(\frac{k}{NPTS} \cdot ln(f_{max}-f_{min})\Biggr)$"
plot_label_points_new = r"St\"utzpunktformel B: $\displaystyle f_{kB} = exp\Biggr((1-\frac{k}{NPTS}) \cdot ln(f_{min})\Biggr) \cdot exp\Biggr(\frac{k}{NPTS} \cdot ln(f_{max})\Biggr)$"
plot_label_vertical_common = r"minimale Frequenz St\"utzpunkt: "
plot_label_vertical_old = r"maximale Frequenz St\"utzpunkt, Methode A: "
plot_label_vertical_new = r"maximale Frequenz St\"utzpunkt, Methode B: "
plot_added_text = r"Verh\"altnis der maximalen Frequenzen Methode A -- Methode B: $\displaystyle \frac{f_{kA}}{f_{kB}}\Bigg|_{k=NPTS} \approx " + str(freq_max_ratio) + "$"
plot_freq_range_label = r"Eingestellter Frequenzbereich: $f_{min} = " + str(fmin) + r"$Hz bis $f_{max} = " + str(fmax) + r"$Hz"
plot_size_measurements = 24
plot_scale_x = 'log'
plot_label_x = r"Frequenz des St\"utzpunkts (Hz)"
plot_1_label_y = 'k (siehe Formel)'
plot_1_title = r"Vergleich St\"utzpunktformeln, effektiv abgedeckter Frequenzbereich:" + str(fmin) + " Hz bis " + str(highest_frequency) + " Hz, " + str(npts+1) + " Punkte"
y_lim_low = -2
y_lim_high = npts + 2
x_lim_low = 0.67 * fmin
x_lim_high = 1.33 * fmin * fmax
# ---------------------------------------------------------#
# Generate points for frequency axis #
# ---------------------------------------------------------#
n = np.linspace(0,npts,npts)
expufunc = np.frompyfunc(exp,1,1)
frequency_vector_old = fmin*expufunc(n*log(fmax-fmin)/npts)
frequency_vector_new = expufunc((1-n/npts)*log(fmin)) * expufunc(n*log(fmax)/npts)
plot_label_vertical_common += str(frequency_vector_old[0]) + " Hz"
plot_label_vertical_old += str(frequency_vector_old[npts-1]) + " Hz"
plot_label_vertical_new += str(frequency_vector_new[npts-1]) + " Hz"
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
#fig1 = figure(1)
#fig1 = figure(1,figsize=(9,9))
#fig1 = figure(1,figsize=(8.26,11.7)) # A4 size in inches
fig1 = figure(1,figsize=(8.26,10.0))
axes1 = fig1.add_subplot(111)
axes1.set_position([0.1,0.1,0.5,0.8])
axes1.scatter(frequency_vector_old,
n,
color=plot_color_old,
s=plot_size_measurements,
label=plot_label_points_old
)
axes1.scatter(frequency_vector_new,
n,
color=plot_color_new,
s=plot_size_measurements,
label=plot_label_points_new
)
# Draw the common starting point black and a bit bigger
axes1.scatter(frequency_vector_old[0],
n[0],
color=plot_color_common,
s=plot_size_measurements*1.5,
)
axes1.plot([frequency_vector_old[0],frequency_vector_old[0]],
[y_lim_low,y_lim_high],
color=plot_color_common,
label=plot_label_vertical_common
)
axes1.plot([frequency_vector_old[npts-1],frequency_vector_old[npts-1]],
[y_lim_low,y_lim_high],
color=plot_color_old,
label=plot_label_vertical_old
)
axes1.plot([frequency_vector_new[npts-1],frequency_vector_new[npts-1]],
[y_lim_low,y_lim_high],
color=plot_color_new,
label=plot_label_vertical_new
)
axes1.set_xscale(plot_scale_x)
axes1.set_ylim([y_lim_low,y_lim_high])
#axes1.set_xlim([x_lim_low,x_lim_high])
axes1.set_xlabel(plot_label_x,fontdict=font)
axes1.set_ylabel(plot_1_label_y,fontdict=font)
axes1.set_title(plot_1_title,fontdict=titlefont)
axes1.tick_params(labelsize=9)
# ---------------------------------------------------- #
# Work some magic to append the fraction of the two #
# methods to the legend instead of it being some #
# random piece of text on the plot. #
# ---------------------------------------------------- #
rect = matplotlib.patches.Rectangle([0,0],0,0,color='white',label=plot_added_text)
rect2= matplotlib.patches.Rectangle([0,0],0,0,color='white',label=plot_freq_range_label)
handles,legends = axes1.get_legend_handles_labels()
handles.append(rect)
handles.append(rect2)
axes1.legend(handles=handles,fontsize=plot_legend_fontsize,loc='upper left',bbox_to_anchor=(0.0,-0.075))
# ---------------------------------------------------- #
# This would be necessary if we wanted to actually #
# draw the patch, leaving this here for future #
# reference. #
# ---------------------------------------------------- #
#axes1.add_patch(rect)
fig1.subplots_adjust(bottom=0.29,left=0.05,right=0.99,top=.98,hspace=0.3)
fig1.savefig('plots-pgf/stuetzpunkte-lowfreq.pgf')
fig1.savefig('plots-pdf/stuetzpunkte-lowfreq.pdf')
#show()
| mit |