repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
srus/django-kickstartvenv | boot/ipython/ipython_config.py | 3 | 19804 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = [
'from __future__ import unicode_literals, print_function, absolute_import, division'
]
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.3 (default, Feb 27 2014, 19:58:35) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.2.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit |
eickenberg/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 10 | 8104 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette
Coefficient. If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 2 <= n_labels <= n_samples-1:
raise ValueError("Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhoeutte Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
jason-z-hang/airflow | airflow/contrib/plugins/metastore_browser/main.py | 42 | 5126 | from datetime import datetime
import json
from flask import Blueprint, request
from flask.ext.admin import BaseView, expose
import pandas as pd
from airflow.hooks import HiveMetastoreHook, MySqlHook, PrestoHook, HiveCliHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
mjudsp/Tsallis | examples/classification/plot_digits_classification.py | 34 | 2409 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 4 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# matplotlib.pyplot.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
gtrensch/nest-simulator | pynest/examples/clopath_synapse_small_network.py | 8 | 7493 | # -*- coding: utf-8 -*-
#
# clopath_synapse_small_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Bidirectional connections
---------------------------------------
This script simulates a small network of ten excitatory and three
inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected
and driven by 500 Poisson generators. The synapses from the Poisson generators
to the excitatory population and those among the neurons of the network
are Clopath synapses. The rate of the Poisson generators is modulated with
a Gaussian profile whose center shifts randomly each 100 ms between ten
equally spaced positions.
This setup demonstrates that the Clopath synapse is able to establish
bidirectional connections. The example is adapted from [1]_ (cf. fig. 5).
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
import random
##############################################################################
# Set the parameters
simulation_time = 1.0e4
resolution = 0.1
delay = resolution
# Poisson_generator parameters
pg_A = 30. # amplitude of Gaussian
pg_sigma = 10. # std deviation
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
# Create neurons and devices
nrn_model = 'aeif_psc_delta_clopath'
nrn_params = {'V_m': -30.6,
'g_L': 30.0,
'w': 0.0,
'tau_plus': 7.0,
'tau_minus': 10.0,
'tau_w': 144.0,
'a': 4.0,
'C_m': 281.0,
'Delta_T': 2.0,
'V_peak': 20.0,
't_clamp': 2.0,
'A_LTP': 8.0e-6,
'A_LTD': 14.0e-6,
'A_LTD_const': False,
'b': 0.0805,
'u_ref_squared': 60.0**2}
pop_exc = nest.Create(nrn_model, 10, nrn_params)
pop_inh = nest.Create(nrn_model, 3, nrn_params)
##############################################################################
# We need parrot neurons since Poisson generators can only be connected
# with static connections
pop_input = nest.Create('parrot_neuron', 500) # helper neurons
pg = nest.Create('poisson_generator', 500)
wr = nest.Create('weight_recorder')
##############################################################################
# First connect Poisson generators to helper neurons
nest.Connect(pg, pop_input, 'one_to_one', {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay})
##############################################################################
# Create all the connections
nest.CopyModel('clopath_synapse', 'clopath_input_to_exc',
{'Wmax': 3.0})
conn_dict_input_to_exc = {'rule': 'all_to_all'}
syn_dict_input_to_exc = {'synapse_model': 'clopath_input_to_exc',
'weight': nest.random.uniform(0.5, 2.0),
'delay': delay}
nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc,
syn_dict_input_to_exc)
# Create input->inh connections
conn_dict_input_to_inh = {'rule': 'all_to_all'}
syn_dict_input_to_inh = {'synapse_model': 'static_synapse',
'weight': nest.random.uniform(0.0, 0.5),
'delay': delay}
nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh)
# Create exc->exc connections
nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc',
{'Wmax': 0.75, 'weight_recorder': wr})
syn_dict_exc_to_exc = {'synapse_model': 'clopath_exc_to_exc', 'weight': 0.25,
'delay': delay}
conn_dict_exc_to_exc = {'rule': 'all_to_all', 'allow_autapses': False}
nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc)
# Create exc->inh connections
syn_dict_exc_to_inh = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8}
nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh)
# Create inh->exc connections
syn_dict_inh_to_exc = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6}
nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc)
##############################################################################
# Randomize the initial membrane potential
pop_exc.V_m = nest.random.normal(-60., 25.)
pop_inh.V_m = nest.random.normal(-60., 25.)
##############################################################################
# Simulation divided into intervals of 100ms for shifting the Gaussian
sim_interval = 100.
for i in range(int(simulation_time/sim_interval)):
# set rates of poisson generators
rates = np.empty(500)
# pg_mu will be randomly chosen out of 25,75,125,...,425,475
pg_mu = 25 + random.randint(0, 9) * 50
for j in range(500):
rates[j] = pg_A * np.exp((-1 * (j - pg_mu)**2) / (2 * pg_sigma**2))
pg[j].rate = rates[j]*1.75
nest.Simulate(sim_interval)
##############################################################################
# Plot results
fig, ax = plt.subplots(1, sharex=False)
# Plot synapse weights of the synapses within the excitatory population
# Sort weights according to sender and reshape
exc_conns = nest.GetConnections(pop_exc, pop_exc)
exc_conns_senders = np.array(exc_conns.source)
exc_conns_targets = np.array(exc_conns.target)
exc_conns_weights = np.array(exc_conns.weight)
idx_array = np.argsort(exc_conns_senders)
targets = np.reshape(exc_conns_targets[idx_array], (10, 10 - 1))
weights = np.reshape(exc_conns_weights[idx_array], (10, 10 - 1))
# Sort according to target
for i, (trgs, ws) in enumerate(zip(targets, weights)):
idx_array = np.argsort(trgs)
weights[i] = ws[idx_array]
weight_matrix = np.zeros((10, 10))
tu9 = np.triu_indices_from(weights)
tl9 = np.tril_indices_from(weights, -1)
tu10 = np.triu_indices_from(weight_matrix, 1)
tl10 = np.tril_indices_from(weight_matrix, -1)
weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]]
weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]]
# Difference between initial and final value
init_w_matrix = np.ones((10, 10))*0.25
init_w_matrix -= np.identity(10)*0.25
cax = ax.imshow(weight_matrix - init_w_matrix)
cbarB = fig.colorbar(cax, ax=ax)
ax.set_xticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_yticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_xlabel("to neuron")
ax.set_ylabel("from neuron")
ax.set_title("Change of syn weights before and after simulation")
plt.show()
| gpl-2.0 |
huzq/scikit-learn | sklearn/tests/test_random_projection.py | 9 | 13850 |
import functools
from typing import List, Any
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import _gaussian_random_matrix
from sklearn.random_projection import _sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix: List[Any] = [_sparse_random_matrix]
all_dense_random_matrix: List[Any] = [_gaussian_random_matrix]
all_random_matrix = all_sparse_random_matrix + all_dense_random_matrix
all_SparseRandomProjection: List[Any] = [SparseRandomProjection]
all_DenseRandomProjection: List[Any] = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, eps=-0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, eps=0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], eps=2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
eps=2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
eps=np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert random_matrix(1, 5).shape == (1, 5)
assert random_matrix(5, 1).shape == (5, 1)
assert random_matrix(5, 5).shape == (5, 5)
assert random_matrix(1, 1).shape == (1, 1)
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = _gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = _sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert np.sqrt(s) / np.sqrt(n_components) in values
assert - np.sqrt(s) / np.sqrt(n_components) in values
if density == 1.0:
assert np.size(values) == 2
else:
assert 0. in values
assert np.size(values) == 3
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert distances_ratio.max() < 1 + eps
assert 1 - eps < distances_ratio.min()
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert rp.n_components == 'auto'
assert rp.n_components_ == 110
if RandomProjection in all_SparseRandomProjection:
assert rp.density == 'auto'
assert_almost_equal(rp.density_, 0.03, 2)
assert rp.components_.shape == (110, n_features)
projected_1 = rp.transform(data)
assert projected_1.shape == (n_samples, 110)
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert projected.shape == (n_samples, 100)
assert rp.components_.shape == (100, n_features)
assert rp.components_.nnz < 115 # close to 1% density
assert 85 < rp.components_.nnz # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
appapantula/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
selective-inference/selective-inference | doc/learning_examples/HIV/stability_CV_6000.py | 3 | 3166 | import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
# load in the X matrix
from selection.tests.instance import HIV_NRTI
X_full = HIV_NRTI(datafile="NRTI_DATA.txt", standardize=False)[0]
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=6000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
n, p = X.shape
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth /= np.sqrt(n)
truth *= sigma
y = X.dot(truth) + sigma * np.random.standard_normal(n)
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
print(dispersion, sigma**2)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler(scale=0.5) # deterministic with scale=0
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = full_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
success_params=(6, 10),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
return df
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'HIV_stability_CV_6000.csv'
outbase = csvfile[:-4]
if df is not None or i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
if df is not None:
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
| bsd-3-clause |
evanthebouncy/nnhmm | radar_lstm/draw.py | 6 | 2538 | import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
# m = [[0.0, 1.47, 2.43, 3.44, 1.08, 2.83, 1.08, 2.13, 2.11, 3.7], [1.47, 0.0, 1.5, 2.39, 2.11, 2.4, 2.11, 1.1, 1.1, 3.21], [2.43, 1.5, 0.0, 1.22, 2.69, 1.33, 3.39, 2.15, 2.12, 1.87], [3.44, 2.39, 1.22, 0.0, 3.45, 2.22, 4.34, 2.54, 3.04, 2.28], [1.08, 2.11, 2.69, 3.45, 0.0, 3.13, 1.76, 2.46, 3.02, 3.85], [2.83, 2.4, 1.33, 2.22, 3.13, 0.0, 3.83, 3.32, 2.73, 0.95], [1.08, 2.11, 3.39, 4.34, 1.76, 3.83, 0.0, 2.47, 2.44, 4.74], [2.13, 1.1, 2.15, 2.54, 2.46, 3.32, 2.47, 0.0, 1.78, 4.01], [2.11, 1.1, 2.12, 3.04, 3.02, 2.73, 2.44, 1.78, 0.0, 3.57], [3.7, 3.21, 1.87, 2.28, 3.85, 0.95, 4.74, 4.01, 3.57, 0.0]]
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[20,20,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.savefig(name)
def draw_obs(obs, name):
ret_shape = [20, 20, 1]
ret = np.zeros(shape=ret_shape)
for ii, ob in enumerate(obs):
if ob.max() > 0.0:
idxx = np.unravel_index(ob.argmax(), ob.shape)
if idxx[-1] == 0:
ret[idxx[0]][idxx[1]] = 1.0 * ii
else:
ret[idxx[0]][idxx[1]] = -1.0 * ii
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,20])
ax.set_ylim([0,20])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_trace(trace, name):
x_coords = []
y_coords = []
anno = []
for i, stuff in enumerate(trace):
ob, inv = stuff
# x_coords.append(inv[0])
# y_coords.append(inv[1])
# anno.append("X"+str(i))
if ob != None:
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
| mit |
zhujianwei31415/dcnnfold | scripts/evaluation/evaluate_spec_sens_three_levels.py | 2 | 1892 | #! /usr/bin/env python
#
# Copyright
# Author: zhujianwei@ict.ac.cn (Jianwei Zhu)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
# read in score function
def read_score(score_file):
labels, values = [], []
with open(score_file, 'r') as fin:
for line in fin:
cols = line.split()
labels.append(0 if cols[0] == '-1' else 1)
values.append(float(cols[1]))
return np.array(labels), np.array(values)
# calculate specificity and sensitivity
def cal_spec_sens(y_test, y_score):
spec, sens, _ = precision_recall_curve(y_test, y_score)
return spec, sens
# write specificity and sensitivity
def write_spect_sens(spec, sens, outfile):
with open(outfile, 'w') as fout:
for j, _ in enumerate(spec):
print('%10.8f %10.8f' % (spec[j], sens[j]), file=fout)
def main(family_score, superfamily_score, fold_score):
# calculate family level
y_test, y_score = read_score(family_score)
spec, sens = cal_spec_sens(y_test, y_score)
write_spect_sens(spec, sens, 'spec-sens-family')
# calculate superfamily level
y_test, y_score = read_score(superfamily_score)
spec, sens = cal_spec_sens(y_test, y_score)
write_spect_sens(spec, sens, 'spec-sens-superfamily')
# calculate fold level
y_test, y_score = read_score(fold_score)
spec, sens = cal_spec_sens(y_test, y_score)
write_spect_sens(spec, sens, 'spec-sens-fold')
if __name__ == '__main__':
if len(sys.argv) != 4:
sys.exit('Usage: %s <value-family> <value-superfamily> <value-fold>' % sys.argv[0])
family_score, superfamily_score, fold_score = sys.argv[1:]
main(family_score, superfamily_score, fold_score)
| gpl-3.0 |
jcrist/blaze | blaze/compute/tests/test_bcolz_compute.py | 9 | 5874 | from __future__ import absolute_import, division, print_function
import pytest
bcolz = pytest.importorskip('bcolz')
from datashape import discover, dshape
import numpy as np
import pandas.util.testing as tm
from odo import into
from blaze import by
from blaze.expr import symbol
from blaze.compute.core import compute, pre_compute
from blaze.compute.bcolz import get_chunksize
b = bcolz.ctable(np.array([(1, 1., np.datetime64('2010-01-01')),
(2, 2., np.datetime64('NaT')),
(3, 3., np.datetime64('2010-01-03'))],
dtype=[('a', 'i8'),
('b', 'f8'),
('date', 'datetime64[D]')]))
t = symbol('t', 'var * {a: int64, b: float64, date: ?date}')
to = symbol('to', 'var * {a: int64, b: float64}')
bo = bcolz.ctable(np.array([(1, 1.), (2, 2.), (3, np.nan)],
dtype=[('a', 'i8'), ('b', 'f8')]))
def test_discover():
assert discover(b) == dshape('3 * {a: int64, b: float64, date: date}')
assert discover(b['a']) == dshape('3 * int64')
def test_reductions():
assert compute(t.a.sum(), b) == 6
assert compute(t.a.min(), b) == 1
assert compute(t.a.max(), b) == 3
assert compute(t.a.mean(), b) == 2.0
assert abs(compute(t.a.std(), b) - np.std([1, 2, 3])) < 1e-5
assert abs(compute(t.a.var(), b) - np.var([1, 2, 3])) < 1e-5
assert abs(compute(t.a.std(unbiased=True), b) - np.std([1, 2, 3],
ddof=1)) < 1e-5
assert abs(compute(t.a.var(unbiased=True), b) - np.var([1, 2, 3],
ddof=1)) < 1e-5
assert len(list(compute(t.distinct(), b))) == 3
assert len(list(compute(t.a.distinct(), b))) == 3
assert compute(t.a.nunique(), b) == 3
assert isinstance(compute(t.a.nunique(), b), np.integer)
assert compute(t.a.count(), b) == 3
assert isinstance(compute(t.date.count(), b), np.integer)
assert compute(t.date.nunique(), b) == 2
assert isinstance(compute(t.date.nunique(), b), np.integer)
assert compute(t.date.count(), b) == 2
assert isinstance(compute(t.a.count(), b), np.integer)
assert compute(t.a[0], b) == 1
assert compute(t.a[-1], b) == 3
assert compute(t[0], b) == compute(t[0], b)
assert compute(t[-1], b) == compute(t[-1], b)
def test_nunique():
assert compute(t.a.nunique(), b) == 3
assert compute(t.nunique(), b) == 3
def test_selection_head():
ds = dshape('var * {a: int32, b: int32, c: float64}')
b = into(bcolz.ctable,
[(i, i + 1, float(i) ** 2) for i in range(10)],
dshape=ds)
t = symbol('t', ds)
# numpy reductions return numpy scalars
assert compute((t.a < t.b).all(), b).item() is True
assert list(compute(t[t.a < t.b].a.head(10), b)) == list(range(10))
assert list(compute(t[t.a > t.b].a.head(10), b)) == []
assert into([], compute(t[t.a + t.b > t.c], b)) == [(0, 1, 0),
(1, 2, 1),
(2, 3, 4)]
assert len(compute(t[t.a + t.b > t.c].head(10), b)) # non-empty
assert len(compute(t[t.a + t.b < t.c].head(10), b)) # non-empty
def test_selection_isnan():
b = bcolz.ctable([[1, np.nan, 3], [1., 2., np.nan]], names=['a', 'b'])
t = symbol('t', discover(b))
lhs = compute(t[t.a.isnan()], b)
rhs = np.array([(np.nan, 2.0)], dtype=b.dtype)
for n in b.dtype.names:
assert np.isclose(lhs[n], rhs[n], equal_nan=True).all()
assert np.isclose(compute(t[~t.b.isnan()], b)[n],
np.array(
[(1, 1.0), (np.nan, 2.0)], dtype=b.dtype)[n],
equal_nan=True).all()
def test_count_isnan():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_object():
assert compute(to.a[~to.b.isnan()].count(), bo) == 2
def test_count_isnan_struct():
assert compute(t[~t.b.isnan()].count(), b) == 3
def test_nrows():
assert compute(t.nrows, b) == len(b)
def test_nelements():
assert compute(t.nelements(axis=0), b) == len(b)
assert compute(t.nelements(), b) == len(b)
# This is no longer desired. Handled by compute_up
def dont_test_pre_compute():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
result = pre_compute(s[['a', 'b']], b)
assert result.names == ['a', 'b']
def eq(a, b):
return np.array_equal(a, b)
def test_unicode_field_names():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]))
s = symbol('s', discover(b))
assert eq(compute(s[u'a'], b)[:], compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:], compute(s[['a', 'c']], b)[:])
assert eq(compute(s[u'a'], b)[:],
compute(s['a'], b)[:])
assert eq(compute(s[[u'a', u'c']], b)[:],
compute(s[['a', 'c']], b)[:])
def test_chunksize_inference():
b = bcolz.ctable(np.array([(1, 1., 10.), (2, 2., 20.), (3, 3., 30.)],
dtype=[('a', 'i8'), ('b', 'f8'), ('c', 'f8')]),
chunklen=2)
assert get_chunksize(b) == 2
def test_notnull():
with pytest.raises(AttributeError):
t.b.notnull
def test_by_with_single_row():
ct = bcolz.ctable([[1, 1, 3, 3], [1, 2, 3, 4]], names=list('ab'))
t = symbol('t', discover(ct))
subset = t[t.a == 3]
expr = by(subset.a, b_sum=subset.b.sum())
result = compute(expr, ct)
expected = compute(expr, ct, optimize=False)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
kmike/scikit-learn | examples/plot_lda_qda.py | 12 | 4758 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import pylab as pl
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
pl.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = pl.subplot(2, 2, fig_index)
if fig_index == 1:
pl.title('Linear Discriminant Analysis')
pl.ylabel('Data with fixed covariance')
elif fig_index == 2:
pl.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
pl.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
pl.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
pl.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
pl.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
pl.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = pl.xlim()
y_min, y_max = pl.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
pl.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
pl.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
pl.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
pl.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA()
y_pred = lda.fit(X, y, store_covariance=True).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
pl.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
pl.axis('tight')
pl.suptitle('LDA vs QDA')
pl.show()
| bsd-3-clause |
BoldingBruggeman/gotm | gui.py/xmlplot/data/gotmtext.py | 1 | 35659 | import os, StringIO
import numpy
import xmlstore.xmlstore
import xmlplot.common
class LinkedFileVariableStore(xmlplot.common.VariableStore,xmlstore.datatypes.DataFileEx):
# XML store-derived class for storing (cached) metadata of a data file,
# such as coordinate ranges.
# This is implemented as XML store (rather than Python object) because it
# needs to be saved in a descriptive form along with the data files themselves.
class DataFileCache(xmlstore.xmlstore.TypedStore):
@classmethod
def getSchemaInfo(cls):
return xmlstore.xmlstore.schemainfocache[os.path.join(xmlplot.common.getDataRoot(),'schemas/datafilecache')]
def __init__(self,valueroot=None,adddefault = True,schema=None):
if schema is None: schema = os.path.join(xmlplot.common.getDataRoot(),'schemas/datafilecache/0001.schema')
xmlstore.xmlstore.TypedStore.__init__(self,schema,valueroot,adddefault=adddefault)
class LinkedFileVariable(xmlplot.common.Variable):
def __init__(self,store,data,index):
xmlplot.common.Variable.__init__(self,store)
self.store = store
self.data = data
self.index = index
def getName_raw(self):
return self.data[0]
def getLongName(self):
return self.data[1]
def getUnit(self):
return self.data[2]
def getDimensions_raw(self):
return self.store.dimensionorder[:]
def getSlice(self,bounds):
assert False, 'This function must be implemented by inheriting class.'
@classmethod
def createTypedStore(ownclass):
return LinkedFileVariableStore.DataFileCache()
linkedfilename = 'linkedfile_metadata.xml'
rootnodename = 'DataFile'
@classmethod
def createObject(ownclass,datafile,context,infonode,nodename):
finfo = xmlstore.util.findDescendantNode(infonode,['fileinfo'])
assert finfo is not None, 'Node "%s" lacks "fileinfo" attribute.' % node
store = None
type = finfo.getAttribute('type')
if type=='pointsintime':
store = LinkedMatrix(datafile,context,infonode,nodename,type=0,dimensions={'time':{'label':'time','datatype':'datetime','preferredaxis':'x'}},dimensionorder=('time',))
elif type=='profilesintime':
store = LinkedProfilesInTime(datafile,context,infonode,nodename,dimensions={'time':{'label':'time','datatype':'datetime','preferredaxis':'x'},'z':{'label':'depth','unit':'m','preferredaxis':'y'}},dimensionorder=('time','z'))
elif type=='singleprofile':
store = LinkedMatrix(datafile,context,infonode,nodename,type=1)
else:
assert False, 'Linked file has unknown type "%s".' % node.type
return store
# Dictionary linking our data type names to MatPlotLib data types.
# Note that times are stored as numeric values (via matplotlib.dates.date2num)
mpldatatypes = {'datetime':numpy.float64,
'float': numpy.float32,
'float32': numpy.float32,
'float64': numpy.float64}
def __init__(self,datafile,context,infonode,nodename,dimensions={},dimensionorder=(),variables=[],datatype='float',defaultfilename='data'):
xmlplot.common.VariableStore.__init__(self)
xmlstore.datatypes.DataFileEx.__init__(self,datafile,context,infonode,nodename)
# Copy data from supplied dimensions and variables
self.dimensions = {}
for dimname,dimdata in dimensions.iteritems():
self.dimensions[dimname] = xmlplot.common.VariableStore.getDimensionInfo_raw(self,None)
self.dimensions[dimname].update(dimdata)
self.vardata = list(variables)
self.dimensionorder = list(dimensionorder)
# Supplement dimensions and variables with information in
# supplied XML node (if any)
self.filename = defaultfilename
if infonode is not None:
finfo = xmlstore.util.findDescendantNode(infonode,['fileinfo'])
self.filename = infonode.getAttribute('name')
if finfo.hasAttribute('datatype'): datatype = finfo.getAttribute('datatype')
# Get variables
fvars = xmlstore.util.findDescendantNode(finfo,['filevariables'])
if fvars is not None:
for ch in fvars.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='filevariable':
assert ch.hasAttribute('name'), '"name" attribute of filevariable is missing, label = %s.' % longname
name = ch.getAttribute('name')
unit = ch.getAttribute('unit')
if ch.hasAttribute('label'):
longname = ch.getAttribute('label')
else:
longname = name
self.vardata.append((name,longname,unit))
# Get dimensions
fdims = xmlstore.util.findDescendantNode(finfo,['filedimensions'])
if fdims is not None:
for ch in fdims.childNodes:
if ch.nodeType==ch.ELEMENT_NODE and ch.localName=='filedimension':
dimdata = xmlplot.common.VariableStore.getDimensionInfo_raw(self,None)
assert ch.hasAttribute('name'), '"name" attribute of filedimension is missing, label = "%s".' % ch.getAttribute('label')
id = ch.getAttribute('name')
if ch.hasAttribute('label'):
dimdata['label'] = ch.getAttribute('label')
else:
dimdata['label'] = id
if ch.hasAttribute('unit'): dimdata['unit'] = ch.getAttribute('unit')
if ch.hasAttribute('datatype'): dimdata['datatype'] = ch.getAttribute('datatype')
if ch.hasAttribute('preferredaxis'): dimdata['preferredaxis'] = ch.getAttribute('preferredaxis')
self.dimensions[id] = dimdata
self.dimensionorder.append(id)
self.data = None
self.datatype = datatype
def copy(self):
"""Returns a copy of the LinkedFileVariableStore object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedFileVariableStore(None,None,None,None,self.dimensions,self.dimensionorder,self.vardata,self.datatype,defaultfilename=self.filename)
def clear(self,clearfile=True):
"""Clears all data, and by default also clears the original datafile
(if any). The metadata set on the object will be updated accordingly.
"""
self.dataChanged(clearfile=clearfile)
def setDataFile(self,datafile=None,cleardata=True):
"""Attaches a new data file as source of data. This will clear all
metadata set on the object, and by default it will also clear any
parsed data.
"""
xmlstore.datatypes.DataFileEx.setDataFile(self,datafile)
if cleardata: self.data = None
def setData(self,data,clearfile=True):
"""Sets a new data block, automatically updating the metadata set on
the object. By default it will clear the original datafile (if any).
"""
self.data = data
self.dataChanged(clearfile=clearfile)
def dataChanged(self,clearfile=True):
"""Event handler, to be called just after the data has changed.
"""
if clearfile: self.setDataFile(None,cleardata=False)
if self.data is None: return
#print '%s - caching validation result and dimension boundaries.' % self.filename
metadata = self.getMetaData()
for dimname in self.getDimensionNames():
dimnode = metadata['Dimensions'].getChildById('Dimension',id=dimname,create=True)
assert dimnode is not None, 'Failed to create Dimension node for %s.' % dimname
dimrange = self.calculateDimensionRange(dimname)
if dimrange is None: continue
minval,maxval = dimrange
if self.getDimensionInfo_raw(dimname)['datatype']=='datetime':
dimnode['IsTimeDimension'].setValue(True)
dimnode['MinimumTime'].setValue(xmlplot.common.num2date(minval))
dimnode['MaximumTime'].setValue(xmlplot.common.num2date(maxval))
else:
dimnode['IsTimeDimension'].setValue(False)
dimnode['Minimum'].setValue(minval)
dimnode['Maximum'].setValue(maxval)
metadata['Valid'].setValue(True)
def getDimensionNames(self):
"""Returns the names of data dimensions.
"""
return self.dimensionorder[:]
def getDimensionInfo_raw(self,dimname):
"""Returns information on the specified data dimension.
see VariableStore.getDimensionInfo for the type of
information returned.
"""
return self.dimensions[dimname]
def getDimensionRange(self,dimname):
"""Returns the range, i.e., the tuple (minimum, maximum) of the
specified dimension.
"""
if self.data is None and (self.datafile is None or not self.datafile.isValid()): return None
metadata = self.getMetaData()
dimnode = metadata['Dimensions'].getChildById('Dimension',dimname)
if dimnode is None:
try:
self.getData()
except Exception,e:
pass
dimnode = metadata['Dimensions'].getChildById('Dimension',dimname)
assert dimnode is not None, 'Cannot locate node for dimension %s in data file cache.' % dimname
if metadata['Valid'].getValue()==False: return None
#print '%s - using cached bounds for %s.' % (self.filename,dimname)
if dimnode['IsTimeDimension'].getValue():
minval = dimnode['MinimumTime'].getValue()
maxval = dimnode['MaximumTime'].getValue()
else:
minval = dimnode['Minimum'].getValue()
maxval = dimnode['Maximum'].getValue()
if minval is None and maxval is None: return None
return (minval,maxval)
def hasExpensiveValidate(self):
return True
def validate(self,templatenode,callback=None):
if self.data is None and (self.datafile is None or not self.datafile.isValid()): return False
metadata = self.getMetaData()
valid = metadata['Valid'].getValue()
if valid is None:
try:
self.getData(callback=callback)
except Exception,e:
pass
valid = metadata['Valid'].getValue()
assert valid is not None, 'Information on validity of data file %s not in data file cache.' % self.filename
#print '%s - using cached validation result.' % self.filename
return valid
def getVariableNames_raw(self):
"""Returns the names of all variables in the store.
"""
return [data[0] for data in self.vardata]
def getVariableLongNames_raw(self):
"""Returns the long name of the specified variable.
"""
return dict([(data[0],data[1]) for data in self.vardata])
def getVariable_raw(self,varname):
"""Returns the specified variable as LinkedFileVariable object.
"""
for (index,data) in enumerate(self.vardata):
if data[0]==varname:
return self.variableclass(self,data,index)
return None
def loadFromFile(self,path):
datafile = xmlstore.datatypes.DataContainerDirectory.DataFileFile(path)
self.setDataFile(datafile)
datafile.release()
def saveToFile(self,path,callback=None):
"""Saves the current data to file."""
if self.datafile is not None:
self.datafile.saveToFile(path)
else:
f = open(path,'w')
self.writeData(f,callback=callback)
f.close()
def getDataFile(self,callback=None):
if self.datafile is None:
assert self.data is not None, 'getDataFile called with both the data file and the data in memory are not set.'
# Data not present as data file object. Create one in memory on the spot.
target = StringIO.StringIO()
self.writeData(target,callback=callback)
self.datafile = xmlstore.datatypes.DataFileMemory(target.getvalue(),self.filename+'.dat')
target.close()
return self.datafile.addref()
def writeData(self,target,callback=None):
"""Writes the current data to a file-like object."""
assert False, 'writeData must be implemented by derived class.'
def getData(self,callback=None):
if self.data is None and self.datafile is not None:
try:
data = self.parseDataFile(callback)
except Exception,e:
self.getMetaData()['Valid'].setValue(False)
raise
self.setData(data,clearfile=False)
return self.data
def parseDataFile(self,callback=None):
assert False, 'parseDataFile must be implemented by derived class.'
class LinkedMatrix(LinkedFileVariableStore):
class LinkedMatrixVariable(LinkedFileVariableStore.LinkedFileVariable):
def getSlice(self,bounds):
slice = self.Slice(self.getDimensions())
# Get a reference to all data, and stop if the coordinate dimension is empty.
data = self.store.getData()
if data[0].shape[0]==0: return slice
if slice.ndim==1:
slice.coords[0] = data[0][:]
slice.data = data[-1][:,self.index]
slice.generateStaggered()
return slice
def getShape(self):
data = self.store.getData()
if data[0].shape[0]==0: return tuple()
return data[-1][:,self.index].shape
def __init__(self,datafile=None,context=None,infonode=None,nodename=None,type=0,dimensions={},dimensionorder=(),variables=[],defaultfilename='data'):
LinkedFileVariableStore.__init__(self,datafile,context,infonode,nodename,dimensions,dimensionorder,variables,defaultfilename=defaultfilename)
self.variableclass = self.LinkedMatrixVariable
assert len(self.dimensions)<=1, 'Linkedmatrix objects can only be used with 0 or 1 coordinate dimensions, but %i are present.' % len(self.dimensions)
self.type = type
def copy(self):
"""Returns a copy of the LinkedMatrix object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedMatrix(dimensions=self.dimensions,dimensionorder=self.dimensionorder,variables=self.vardata,type=self.type,defaultfilename=self.filename)
def clear(self,clearfile=True):
"""Clears all contained data."""
self.data = []
if len(self.dimensions)==1:
dimdatatype = self.dimensions[self.dimensionorder[0]]['datatype']
self.data.append(numpy.empty((0,),self.mpldatatypes[dimdatatype]))
self.data.append(numpy.empty((0,len(self.vardata)),self.mpldatatypes[self.datatype]))
LinkedFileVariableStore.clear(self,clearfile=clearfile)
def calculateDimensionRange(self,dimname):
ind = self.dimensionorder.index(dimname)
dimdata = self.getData()[ind]
if 0 in dimdata.shape: return None
return (dimdata.min(),dimdata.max())
def parseDataFile(self,callback=None):
if self.datafile is None or not self.datafile.isValid(): return None
if self.type==0:
# Unknown number of rows
res = self.loadDataFile_UnknownCount(callback)
elif self.type==1:
# Known number of rows
res = self.loadDataFile_KnownCount(callback)
else:
assert False, 'unknown LinkedMatrix type %i.' % self.type
return res
def loadDataFile_KnownCount(self,callback):
"""Load a data from a DataFile object with the number of lines listed on the first line.
"""
# Get number of dimensions and variables.
dimcount = len(self.dimensions)
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
# First line contains number of observations to follow.
line = f.readline()
if line=='':
raise Exception('File is empty. Expected number of observations on first line.')
obscount = int(line)
# Allocate arrays for storage of coordinates and variable values
values = numpy.empty((obscount,varcount),self.mpldatatypes[self.datatype])
if dimcount==1:
# One coordinate dimension present; allocate an array for its values.
dimtype = self.dimensions[self.dimensionorder[0]]['datatype']
dimisdate = (dimtype=='datetime')
if dimisdate:
prevdate = None
dimvalues = numpy.empty((obscount,),self.mpldatatypes[dimtype])
for irow in range(values.shape[0]):
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='':
raise Exception('End-of-file reached after line %i, but expecting still %i more rows of observations.' % (irow+1,values.shape[0]-irow))
iline = irow+2 # One-based line index
if dimcount==1:
if dimisdate:
# Read the date + time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
dimvalue = xmlstore.util.dateTimeFromTuple(refvals)
if prevdate is not None and dimvalue<prevdate:
raise Exception('Line %i: observation time %s lies before previous observation time %s. Times should be increasing.' % (iline,xmlstore.util.formatDateTime(dimvalue),xmlstore.util.formatDateTime(prevdate)))
prevdate = dimvalue
dimvalue = xmlplot.common.date2num(dimvalue)
# Read variable values.
data = line[19:].split()
else:
# Split line, convert values to floats and store first as coordinate.
data = map(float,line.split())
dimvalue = data.pop(0)
else:
data = map(float,line.split())
if len(data)<varcount:
raise Exception('Line %i contains only %i observations, where %i are expected (%s).' % (iline,len(data),varcount,', '.join([d[1] for d in self.vardata])))
# Store time and values.
if dimcount==1: dimvalues[irow] = dimvalue
values[irow,:] = data[:varcount]
# Inform caller about progress
if callback is not None and iline%1000==0:
progress = None
if filesize is not None:
try:
progress = float(f.tell())/filesize
except AttributeError:
progress = None
callback(progress,'read %i lines.' % iline)
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
if dimcount==1:
return [dimvalues,values]
else:
return [values]
def loadDataFile_UnknownCount(self,callback):
"""Load a data file with the number of lines not known in advance.
"""
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
# Get the data type to use for the dimension
dimdatatype = self.dimensions[self.dimensionorder[0]]['datatype']
# Size of one memory slab (roughly equivalent to 1 MB in memory)
buffersize = 125000/(varcount+1)
times = []
values = []
iline = 0
while True:
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='': break
# Calculate position in current memory slab, create new slab if needed.
ipos = iline%buffersize
if ipos==0:
times.append(numpy.empty((buffersize,), self.mpldatatypes[dimdatatype]))
values.append(numpy.empty((buffersize,varcount),self.mpldatatypes[self.datatype]))
# Increment current line number
iline += 1
# Read the date + time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
curdate = xmlstore.util.dateTimeFromTuple(refvals)
times[-1][ipos] = xmlplot.common.date2num(curdate)
# Read values.
data = line[19:].split()
if len(data)<varcount:
raise Exception('Line %i contains only %i observations, where %i are expected (%s).' % (iline,len(data),varcount,', '.join([d[1] for d in self.vardata])))
values[-1][ipos,:] = map(float,data[:varcount])
# Inform caller about progress
if callback is not None and iline%1000==0:
progress = None
if filesize is not None:
try:
progress = float(f.tell())/filesize
except AttributeError:
progress = None
callback(progress,'read %i lines.' % iline)
if len(times)>0:
# Delete unused rows in last memory slab.
times [-1] = times [-1][0:iline%buffersize]
values[-1] = values[-1][0:iline%buffersize,:]
# Concatenate memory slab.
times = numpy.concatenate(times,axis=0)
values = numpy.concatenate(values,axis=0)
else:
# No data read: create empty time and value arrays
times = numpy.zeros((0,),self.mpldatatypes[dimdatatype])
values = numpy.zeros((0,varcount),self.mpldatatypes[self.datatype])
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
return [times,values]
def writeData(self,target,callback=None,missing=''):
"""Writes the current data to a file-like object."""
# Get number of dimensions and variables, and get shortcuts to the data.
dimcount = len(self.dimensions)
data = self.getData()
if dimcount==1:
# One coordinate dimension present; get the data type of that dimension.
dimdata = data[0]
dimtype = self.dimensions.values()[0]['datatype']
dimisdate = (dimtype=='datetime')
if dimisdate: dimdata = xmlplot.common.num2date(dimdata)
varcount = len(self.vardata)
vardata = data[-1]
# Get the mask of the data (numpy.ma.nomask if not set)
mask = numpy.ma.getmask(vardata)
if self.type==1:
# Write first line with number of observations.
target.write('%i\n' % vardata.shape[0])
# Write lines with observations.
for iline in range(vardata.shape[0]):
if dimcount==1:
if dimisdate:
target.write(xmlstore.util.formatDateTime(dimdata[iline],iso=True))
else:
target.write('%.12g' % dimdata[iline])
for ivar in range(varcount):
if mask is not numpy.ma.nomask and mask[iline,ivar]:
target.write('\t%s' % missing)
else:
target.write('\t%.12g' % vardata[iline,ivar])
target.write('\n')
if callback is not None and iline%1000==0:
callback(float(iline)/vardata.shape[0],'wrote %i lines.' % iline)
class LinkedProfilesInTime(LinkedFileVariableStore):
class LinkedProfilesInTimeVariable(LinkedFileVariableStore.LinkedFileVariable):
def getSlice(self,bounds):
varslice = self.Slice(self.getDimensions())
data = self.store.getGriddedData()
if data[0].shape[0]==0: return varslice
timebounds = xmlplot.common.findIndices((bounds[0].start,bounds[0].stop),data[0])
varslice.coords[0] = data[0][timebounds[0]:timebounds[1]+1]
varslice.coords[1] = data[1]
varslice.data = data[2][timebounds[0]:timebounds[1]+1,:,self.index]
varslice.generateStaggered()
return varslice
def getShape(self):
data = self.store.getGriddedData()
if data[0].shape[0]==0: return tuple()
return data[-1][:,:,self.index].shape
def __init__(self,datafile,context,infonode,nodename,dimensions=[],dimensionorder=(),variables=[],defaultfilename='data'):
LinkedFileVariableStore.__init__(self,datafile,context,infonode,nodename,dimensions,dimensionorder,variables,defaultfilename=defaultfilename)
self.variableclass = self.LinkedProfilesInTimeVariable
def copy(self):
"""Returns a copy of the LinkedProfilesInTime object.
Currently this copies descriptive metadata, but no actual values.
"""
return LinkedProfilesInTime(None,None,None,None,dimensions=self.dimensions,dimensionorder=self.dimensionorder,variables=self.vardata,defaultfilename=self.filename)
def setDataFile(self,datafile=None,cleardata=True):
LinkedFileVariableStore.setDataFile(self,datafile,cleardata=cleardata)
if cleardata: self.griddeddata = None
def clear(self,clearfile=True):
self.data = [numpy.empty((0,)),[],[]]
LinkedFileVariableStore.clear(self,clearfile=clearfile)
def dataChanged(self,clearfile=True):
"""Event handler, must be called by external actors when they change the data."""
self.griddeddata = None
LinkedFileVariableStore.dataChanged(self,clearfile=clearfile)
def calculateDimensionRange(self,dimname):
ind = self.dimensionorder.index(dimname)
dimdata = self.getData()[ind]
if len(dimdata)==0: return None
if ind==0:
return (dimdata.min(),dimdata.max())
else:
dimmin,dimmax = None,None
for curdata in dimdata:
if 0 in curdata.shape: continue
curmin,curmax = curdata.min(),curdata.max()
if dimmin is None or curmin<dimmin: dimmin = curmin
if dimmax is None or curmax>dimmax: dimmax = curmax
return (dimmin,dimmax)
def writeData(self,target,callback=None):
"""Writes the current data to a file-like object."""
varcount = len(self.vardata)
data = self.getData()
assert data is not None, 'Cannot write data to file, because data is set to None.'
times,depths,values = data
for itime in range(times.shape[0]):
target.write(xmlstore.util.formatDateTime(xmlplot.common.num2date(times[itime]),iso=True))
curdepths = depths[itime]
curdata = values[itime]
depthcount = len(curdepths)
target.write('\t%i\t1\n' % depthcount)
for idepth in range(depthcount):
target.write('%.9g' % curdepths[idepth])
for ivar in range(varcount):
target.write('\t%.9g' % curdata[idepth,ivar])
target.write('\n')
def getGriddedData(self,callback=None):
data = self.getData()
if self.griddeddata is None:
# Select only non-empty profiles
times,depths,values = [],[],[]
for t,d,v in zip(*data):
if 0 not in d.shape:
times.append(t)
depths.append(d)
values.append(v)
times = numpy.array(times,dtype=data[0].dtype)
varcount = len(self.vardata)
# Find unique depth levels.
uniquedepths = set()
for ds in depths:
for d in ds: uniquedepths.add(d)
# Create depth grid to interpolate on to. Use the observation depths if less than 200,
# otherwise create a equidistant 200-point grid between the minimum and maximum depth.
uniquedepths = list(uniquedepths)
uniquedepths.sort()
if len(uniquedepths)<200:
depthdatatype = self.dimensions[self.dimensionorder[1]]['datatype']
depthgrid = numpy.array(uniquedepths,self.mpldatatypes[depthdatatype])
else:
depthgrid = numpy.linspace(uniquedepths[0],uniquedepths[-1],200)
# Grid observed profiles to depth grid.
griddedvalues = numpy.empty((times.shape[0],depthgrid.shape[0],varcount),self.mpldatatypes[self.datatype])
for it in range(len(times)):
griddedvalues[it,:,:] = xmlplot.common.interp1(depths[it],values[it],depthgrid)
if callback is not None and (it+1)%20==0:
callback(float(it+1)/len(times),'gridded %i profiles.' % (it+1))
# Store time grid, depth grid and observations.
self.griddeddata = (times,depthgrid,griddedvalues)
return self.griddeddata
def parseDataFile(self,callback=None):
if self.datafile is None or not self.datafile.isValid(): return None
varcount = len(self.vardata)
# Get the size of the file (in bytes, may be None if the size is not known)
# This will be used in combination with the position of the file pointer to report progress.
filesize = float(self.datafile.getSize())
# Access the data through some read-only file-like object.
f = self.datafile.getAsReadOnlyFile()
times = []
depths = []
values = []
iline = 0
while True:
# Read a line (stop if end-of-file was reached)
line = f.readline()
if line=='': break
iline += 1
# Read date & time
try:
refvals = map(int,(line[:4],line[5:7],line[8:10],line[11:13],line[14:16],line[17:19]))
except ValueError:
raise Exception('Line %i does not start with date and time (yyyy-mm-dd hh:mm:ss). Line contents: %s' % (iline,line))
curdate = xmlstore.util.dateTimeFromTuple(refvals)
curdate = xmlplot.common.date2num(curdate)
# Get the number of observations and the depth direction.
(depthcount,updown) = map(int, line[19:].split())
# Create arrays that will contains depths and observed values.
depthdatatype = self.dimensions[self.dimensionorder[1]]['datatype']
curdepths = numpy.empty((depthcount,),self.mpldatatypes[depthdatatype])
curvalues = numpy.empty((depthcount,varcount),self.mpldatatypes[self.datatype])
# Depths can be increasing (updown==1) or decreasing (updown!=1)
if updown==1:
depthindices = range(0,depthcount,1)
else:
depthindices = range(depthcount-1,-1,-1)
# Now parse the specified number of observations to create the profiles.
prevdepth = None
for idepthline in depthindices:
if callback is not None and iline%1000==0:
pos = f.tell()
callback(pos/filesize,'processed %i lines.' % iline)
# Read line
line = f.readline()
if line=='':
raise Exception('Premature end-of-file after line %i; expected %i more observations.' % (iline,depthcount-depthindices.index(idepthline)))
iline += 1
# Read values (depth followed by data) and check.
try:
linedata = map(float,line.split())
except ValueError,e:
raise Exception('Line %i: %s' % (iline,e))
if len(linedata)<varcount+1:
raise Exception('Line %i contains only %i value(s), where %i (1 depth and %i observations) are expected.' % (iline,len(linedata),varcount+1,varcount))
if prevdepth is not None:
if linedata[0]==prevdepth:
raise Exception('Found duplicate observation for depth %.4f at line %i.' % (linedata[0],iline))
if updown==1:
if linedata[0]<prevdepth:
raise Exception('Observation depth decreases from %.4f to %.4f at line %i, but the profile depth was set to increase from first to last observation.' % (prevdepth,linedata[0],iline))
elif linedata[0]>prevdepth:
raise Exception('Observation depth increases from %.4f to %.4f at line %i, but the profile depth was set to decrease from first to last observation.' % (prevdepth,linedata[0],iline))
prevdepth = linedata[0]
# Store current observation
curdepths[idepthline] = linedata[0]
curvalues[idepthline,:] = linedata[1:varcount+1]
# Append the profiles for the current time to the list.
times.append(curdate)
depths.append(curdepths)
values.append(curvalues)
# Inform caller about progress.
if callback is not None and iline%1000==0:
pos = f.tell()
callback(pos/filesize,'processed %i lines.' % iline)
# Convert sequence with times to numpy array.
timedatatype = self.dimensions[self.dimensionorder[0]]['datatype']
times = numpy.array(times,self.mpldatatypes[timedatatype])
# Close data file
f.close()
# Succeeded in reading the data: store them internally.
return [times,depths,values]
| gpl-2.0 |
arjoly/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
RayMick/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
Jay-Jay-D/LeanSTP | Algorithm.Framework/Portfolio/MinimumVariancePortfolioOptimizer.py | 3 | 4622 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from scipy.optimize import minimize
### <summary>
### Provides an implementation of a portfolio optimizer that calculate the optimal weights
### with the weight range from -1 to 1 and minimize the portfolio variance with a target return of 2%
### </summary>
class MinimumVariancePortfolioOptimizer:
'''Provides an implementation of a portfolio optimizer that calculate the optimal weights
with the weight range from -1 to 1 and minimize the portfolio variance with a target return of 2%'''
def __init__(self,
minimum_weight = -1,
maximum_weight = 1,
target_return = 0.02):
'''Initialize the MinimumVariancePortfolioOptimizer
Args:
minimum_weight(float): The lower bounds on portfolio weights
maximum_weight(float): The upper bounds on portfolio weights
target_return(float): The target portfolio return'''
self.minimum_weight = minimum_weight
self.maximum_weight = maximum_weight
self.target_return = target_return
def Optimize(self, historicalReturns, expectedReturns = None, covariance = None):
'''
Perform portfolio optimization for a provided matrix of historical returns and an array of expected returns
args:
historicalReturns: Matrix of annualized historical returns where each column represents a security and each row returns for the given date/time (size: K x N).
expectedReturns: Array of double with the portfolio annualized expected returns (size: K x 1).
covariance: Multi-dimensional array of double with the portfolio covariance of annualized returns (size: K x K).
Returns:
Array of double with the portfolio weights (size: K x 1)
'''
if covariance is None:
covariance = historicalReturns.cov()
if expectedReturns is None:
expectedReturns = historicalReturns.mean()
size = historicalReturns.columns.size # K x 1
x0 = np.array(size * [1. / size])
constraints = [
{'type': 'eq', 'fun': lambda weights: self.get_budget_constraint(weights)},
{'type': 'eq', 'fun': lambda weights: self.get_target_constraint(weights, expectedReturns)}]
opt = minimize(lambda weights: self.portfolio_variance(weights, covariance), # Objective function
x0, # Initial guess
bounds = self.get_boundary_conditions(size), # Bounds for variables
constraints = constraints, # Constraints definition
method='SLSQP') # Optimization method: Sequential Least SQuares Programming
return opt['x']
def portfolio_variance(self, weights, covariance):
'''Computes the portfolio variance
Args:
weighs: Portfolio weights
covariance: Covariance matrix of historical returns'''
variance = np.dot(weights.T, np.dot(covariance, weights))
if variance == 0:
raise ValueError(f'MinimumVariancePortfolioOptimizer.portfolio_variance: Volatility cannot be zero. Weights: {weights}')
return variance
def get_boundary_conditions(self, size):
'''Creates the boundary condition for the portfolio weights'''
return tuple((self.minimum_weight, self.maximum_weight) for x in range(size))
def get_budget_constraint(self, weights):
'''Defines a budget constraint: the sum of the weights equals unity'''
return np.sum(weights) - 1
def get_target_constraint(self, weights, expectedReturns):
'''Ensure that the portfolio return target a given return'''
return np.dot(np.matrix(expectedReturns), np.matrix(weights).T).item() - self.target_return | apache-2.0 |
mayblue9/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 41 | 35602 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/sql.py | 7 | 58343 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isnull
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isnull(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notnull_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnulldata = col[~isnull(col)]
if len(notnulldata):
col_for_inference = notnulldata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notnull_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
DEPRECATED: this parameter will be removed in a future version
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| agpl-3.0 |
vshtanko/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
liuwenf/moose | modules/porous_flow/doc/tests/dispersion.py | 14 | 1881 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import erfc
import pandas as pd
#
# Diffusion-only test
#
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/diff01_out_xmass_0021.csv")
# The analytical solution is erfc(u) where u is a similarity variable
x = np.linspace(0,10,100)
t = 20
d = 1
tau = 0.1
D = d*tau
u = x/(2*np.sqrt(D*t))
plt.figure(1)
plt.plot(x, erfc(u), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 50 s)')
plt.ylim([-0.05,1])
plt.savefig("diffusion_fig.pdf")
#
# Dispersion tests
#
def expected(x,t):
porosity = 0.3
alphal = 0.2
v = 1.05e-3 / porosity
D = alphal * v
return 0.5 * erfc((x - v * t)/(2 *np.sqrt(D * t))) + np.sqrt(v * v * t/(np.pi * D)) * \
np.exp(- (x - v * t)**2/(4 * D * t)) - 0.5 * (1 + v * x / D + v * v * t / D) * np.exp(v * x / D) *\
erfc((x+v*t)/(2*np.sqrt(D*t)))
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/disp01_out_xmass_0029.csv")
plt.figure(2)
plt.plot(x, expected(x, 1e3), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE')
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 1000 s)')
plt.ylim([-0.05,1])
plt.savefig("dispersion_fig.pdf")
#
# Heavy dispersion test
#
# Read MOOSE simulation data
data = pd.read_csv("../../tests/dispersion/disp01_heavy_out_xmass_0105.csv")
plt.figure(3)
plt.plot(x, expected(x, 1e3), label = 'Analytical')
plt.plot(data.x, data.massfrac0, 'o', label = 'MOOSE', markevery=4)
plt.xlabel('x (m)')
plt.ylabel('Mass fraction (-)')
plt.legend()
plt.title('Mass fraction (t = 1000 s)')
plt.ylim([-0.05,1])
plt.savefig("dispersion_heavy_fig.pdf")
sys.exit(0)
| lgpl-2.1 |
zifeo/nest-simulator | topology/pynest/tests/test_plotting.py | 13 | 4111 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic topology hl_api functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
try:
import matplotlib.pyplot as plt
plt.figure() # make sure we can open a window; on Jenkins, DISPLAY is not set
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE, 'Plotting is impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.PlotLayer(l)
self.assertTrue(True)
def test_PlotTargets(self):
"""Test plotting targets."""
ldict = {'elements': ['iaf_neuron', 'iaf_psc_alpha'], 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'grid': {'rows':2, 'columns':2}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
ian = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_neuron']
ipa = [gid for gid in nest.GetLeaves(l)[0]
if nest.GetStatus([gid], 'model')[0] == 'iaf_psc_alpha']
# connect ian -> all using static_synapse
cdict.update({'sources': {'model': 'iaf_neuron'},
'synapse_model': 'static_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'synapse_model']: cdict.pop(k)
# connect ipa -> ipa using stdp_synapse
cdict.update({'sources': {'model': 'iaf_psc_alpha'},
'targets': {'model': 'iaf_psc_alpha'},
'synapse_model': 'stdp_synapse'})
topo.ConnectLayers(l, l, cdict)
for k in ['sources', 'targets', 'synapse_model']: cdict.pop(k)
ctr = topo.FindCenterElement(l)
fig = topo.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
self.assertTrue(True)
def test_PlotKernel(self):
"""Test plotting kernels."""
ldict = {'elements': 'iaf_neuron', 'rows': 3, 'columns':3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
f = plt.figure()
a1 = f.add_subplot(221)
ctr = topo.FindCenterElement(l)
topo.PlotKernel(a1, ctr, {'circular': {'radius': 1.}}, {'gaussian': {'sigma':0.2}})
a2 = f.add_subplot(222)
topo.PlotKernel(a2, ctr, {'doughnut': {'inner_radius': 0.5, 'outer_radius':0.75}})
a3 = f.add_subplot(223)
topo.PlotKernel(a3, ctr, {'rectangular': {'lower_left': [-.5,-.5],
'upper_right':[0.5,0.5]}})
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase,'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
import matplotlib.pyplot as plt
plt.show()
| gpl-2.0 |
kcavagnolo/astroML | book_figures/chapter6/fig_great_wall_MST.py | 3 | 5216 | """
Euclidean Minimum Spanning Tree
-------------------------------
Figure 6.15
An approximate Euclidean minimum spanning tree over the two-dimensional
projection of the SDSS Great Wall. The upper panel shows the input points, and
the middle panel shows the dendrogram connecting them. The lower panel shows
clustering based on this dendrogram, created by removing the largest 10% of the
graph edges, and keeping the remaining connected clusters with 30 or more
members.
Additional information
~~~~~~~~~~~~~~~~~~~~~~
This figure is based on the data presented in Figure 1 of Cowan & Ivezic
(2008). A similar figure appears in the book
"Statistics, Data Mining, and Machine Learning in Astronomy", by
Ivezic, Connolly, Vanderplas, and Gray (2013).
The three panels of this figure show a hierarchical clustering of a subset
of galaxies from the Sloan Digital Sky Survey (SDSS). This region is known
as the "SDSS Great Wall", and contains an extended cluster of several thousand
galaxies approximately 300Mpc (about 1 billion light years) from earth. The
top panel shows the positions of over 8,000 galaxies projected to a 2D plane
with Earth at the point (0, 0). The middle panel shows a dendrogram
representation of a Euclidean Minimum Spanning Tree (MST) over the galaxy
locations. By eliminating edges of a MST which are greater than a given
length, we can measure the amount of clustering at that scale: this is one
version of a class of models known as Hierarchical Clustering. The bottom
panel shows the results of this clustering approach for an edge cutoff of
3.5Mpc, along with a Gaussian Mixture Model fit to the distribution within
each cluster.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse
from sklearn.mixture import GMM
from astroML.clustering import HierarchicalClustering, get_graph_segments
from astroML.datasets import fetch_great_wall
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# get data
X = fetch_great_wall()
xmin, xmax = (-375, -175)
ymin, ymax = (-300, 200)
#------------------------------------------------------------
# Compute the MST clustering model
n_neighbors = 10
edge_cutoff = 0.9
cluster_cutoff = 10
model = HierarchicalClustering(n_neighbors=10,
edge_cutoff=edge_cutoff,
min_cluster_size=cluster_cutoff)
model.fit(X)
print(" scale: %2g Mpc" % np.percentile(model.full_tree_.data,
100 * edge_cutoff))
n_components = model.n_components_
labels = model.labels_
#------------------------------------------------------------
# Get the x, y coordinates of the beginning and end of each line segment
T_x, T_y = get_graph_segments(model.X_train_,
model.full_tree_)
T_trunc_x, T_trunc_y = get_graph_segments(model.X_train_,
model.cluster_graph_)
#------------------------------------------------------------
# Fit a GMM to each individual cluster
Nx = 100
Ny = 250
Xgrid = np.vstack(map(np.ravel, np.meshgrid(np.linspace(xmin, xmax, Nx),
np.linspace(ymin, ymax, Ny)))).T
density = np.zeros(Xgrid.shape[0])
for i in range(n_components):
ind = (labels == i)
Npts = ind.sum()
Nclusters = min(12, Npts // 5)
gmm = GMM(Nclusters, random_state=0).fit(X[ind])
dens = np.exp(gmm.score(Xgrid))
density += dens / dens.max()
density = density.reshape((Ny, Nx))
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 6))
fig.subplots_adjust(hspace=0, left=0.1, right=0.95, bottom=0.1, top=0.9)
ax = fig.add_subplot(311, aspect='equal')
ax.scatter(X[:, 1], X[:, 0], s=1, lw=0, c='k')
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('(Mpc)')
ax = fig.add_subplot(312, aspect='equal')
ax.plot(T_y, T_x, c='k', lw=0.5)
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('(Mpc)')
ax = fig.add_subplot(313, aspect='equal')
ax.plot(T_trunc_y, T_trunc_x, c='k', lw=0.5)
ax.imshow(density.T, origin='lower', cmap=plt.cm.hot_r,
extent=[ymin, ymax, xmin, xmax])
ax.set_xlim(ymin, ymax)
ax.set_ylim(xmin, xmax)
ax.set_xlabel('(Mpc)')
ax.set_ylabel('(Mpc)')
plt.show()
| bsd-2-clause |
aflaxman/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
Petr-Kovalev/nupic-win32 | external/linux32/lib/python2.6/site-packages/matplotlib/_mathtext_data.py | 69 | 57988 | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
r'\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
r'\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
r'\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
r'\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\prime' : ('cmsy10', 73),
r"'" : ('cmsy10', 73),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
r'\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
r'\updownarrow' : ('cmsy10', 94),
r'\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
r'\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
r'\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
r'\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
r'\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0136),
r'\Omega' : ('psyr', 0127),
r'\leftbracket' : ('psyr', 0133),
r'\rightbracket' : ('psyr', 0135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 050),
r'\prime' : ('psyr', 0242),
r'\sharp' : ('psyr', 043),
r'\slash' : ('psyr', 057),
r'\Lamda' : ('psyr', 0114),
r'\neg' : ('psyr', 0330),
r'\Upsilon' : ('psyr', 0241),
r'\rightbrace' : ('psyr', 0175),
r'\rfloor' : ('psyr', 0373),
r'\lambda' : ('psyr', 0154),
r'\to' : ('psyr', 0256),
r'\Xi' : ('psyr', 0130),
r'\emptyset' : ('psyr', 0306),
r'\lfloor' : ('psyr', 0353),
r'\rightparen' : ('psyr', 051),
r'\rceil' : ('psyr', 0371),
r'\ni' : ('psyr', 047),
r'\epsilon' : ('psyr', 0145),
r'\Theta' : ('psyr', 0121),
r'\langle' : ('psyr', 0341),
r'\leftangle' : ('psyr', 0341),
r'\rangle' : ('psyr', 0361),
r'\rightangle' : ('psyr', 0361),
r'\rbrace' : ('psyr', 0175),
r'\circ' : ('psyr', 0260),
r'\diamond' : ('psyr', 0340),
r'\mu' : ('psyr', 0155),
r'\mid' : ('psyr', 0352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {'uni24C8': 9416,
'aring': 229,
'uni22A0': 8864,
'uni2292': 8850,
'quotedblright': 8221,
'uni03D2': 978,
'uni2215': 8725,
'uni03D0': 976,
'V': 86,
'dollar': 36,
'uni301E': 12318,
'uni03D5': 981,
'four': 52,
'uni25A0': 9632,
'uni013C': 316,
'uni013B': 315,
'uni013E': 318,
'Yacute': 221,
'uni25DE': 9694,
'uni013F': 319,
'uni255A': 9562,
'uni2606': 9734,
'uni0180': 384,
'uni22B7': 8887,
'uni044F': 1103,
'uni22B5': 8885,
'uni22B4': 8884,
'uni22AE': 8878,
'uni22B2': 8882,
'uni22B1': 8881,
'uni22B0': 8880,
'uni25CD': 9677,
'uni03CE': 974,
'uni03CD': 973,
'uni03CC': 972,
'uni03CB': 971,
'uni03CA': 970,
'uni22B8': 8888,
'uni22C9': 8905,
'uni0449': 1097,
'uni20DD': 8413,
'uni20DC': 8412,
'uni20DB': 8411,
'uni2231': 8753,
'uni25CF': 9679,
'uni306E': 12398,
'uni03D1': 977,
'uni01A1': 417,
'uni20D7': 8407,
'uni03D6': 982,
'uni2233': 8755,
'uni20D2': 8402,
'uni20D1': 8401,
'uni20D0': 8400,
'P': 80,
'uni22BE': 8894,
'uni22BD': 8893,
'uni22BC': 8892,
'uni22BB': 8891,
'underscore': 95,
'uni03C8': 968,
'uni03C7': 967,
'uni0328': 808,
'uni03C5': 965,
'uni03C4': 964,
'uni03C3': 963,
'uni03C2': 962,
'uni03C1': 961,
'uni03C0': 960,
'uni2010': 8208,
'uni0130': 304,
'uni0133': 307,
'uni0132': 306,
'uni0135': 309,
'uni0134': 308,
'uni0137': 311,
'uni0136': 310,
'uni0139': 313,
'uni0138': 312,
'uni2244': 8772,
'uni229A': 8858,
'uni2571': 9585,
'uni0278': 632,
'uni2239': 8761,
'p': 112,
'uni3019': 12313,
'uni25CB': 9675,
'uni03DB': 987,
'uni03DC': 988,
'uni03DA': 986,
'uni03DF': 991,
'uni03DD': 989,
'uni013D': 317,
'uni220A': 8714,
'uni220C': 8716,
'uni220B': 8715,
'uni220E': 8718,
'uni220D': 8717,
'uni220F': 8719,
'uni22CC': 8908,
'Otilde': 213,
'uni25E5': 9701,
'uni2736': 10038,
'perthousand': 8240,
'zero': 48,
'uni279B': 10139,
'dotlessi': 305,
'uni2279': 8825,
'Scaron': 352,
'zcaron': 382,
'uni21D8': 8664,
'egrave': 232,
'uni0271': 625,
'uni01AA': 426,
'uni2332': 9010,
'section': 167,
'uni25E4': 9700,
'Icircumflex': 206,
'ntilde': 241,
'uni041E': 1054,
'ampersand': 38,
'uni041C': 1052,
'uni041A': 1050,
'uni22AB': 8875,
'uni21DB': 8667,
'dotaccent': 729,
'uni0416': 1046,
'uni0417': 1047,
'uni0414': 1044,
'uni0415': 1045,
'uni0412': 1042,
'uni0413': 1043,
'degree': 176,
'uni0411': 1041,
'K': 75,
'uni25EB': 9707,
'uni25EF': 9711,
'uni0418': 1048,
'uni0419': 1049,
'uni2263': 8803,
'uni226E': 8814,
'uni2251': 8785,
'uni02C8': 712,
'uni2262': 8802,
'acircumflex': 226,
'uni22B3': 8883,
'uni2261': 8801,
'uni2394': 9108,
'Aring': 197,
'uni2260': 8800,
'uni2254': 8788,
'uni0436': 1078,
'uni2267': 8807,
'k': 107,
'uni22C8': 8904,
'uni226A': 8810,
'uni231F': 8991,
'smalltilde': 732,
'uni2201': 8705,
'uni2200': 8704,
'uni2203': 8707,
'uni02BD': 701,
'uni2205': 8709,
'uni2204': 8708,
'Agrave': 192,
'uni2206': 8710,
'uni2209': 8713,
'uni2208': 8712,
'uni226D': 8813,
'uni2264': 8804,
'uni263D': 9789,
'uni2258': 8792,
'uni02D3': 723,
'uni02D2': 722,
'uni02D1': 721,
'uni02D0': 720,
'uni25E1': 9697,
'divide': 247,
'uni02D5': 725,
'uni02D4': 724,
'ocircumflex': 244,
'uni2524': 9508,
'uni043A': 1082,
'uni24CC': 9420,
'asciitilde': 126,
'uni22B9': 8889,
'uni24D2': 9426,
'uni211E': 8478,
'uni211D': 8477,
'uni24DD': 9437,
'uni211A': 8474,
'uni211C': 8476,
'uni211B': 8475,
'uni25C6': 9670,
'uni017F': 383,
'uni017A': 378,
'uni017C': 380,
'uni017B': 379,
'uni0346': 838,
'uni22F1': 8945,
'uni22F0': 8944,
'two': 50,
'uni2298': 8856,
'uni24D1': 9425,
'E': 69,
'uni025D': 605,
'scaron': 353,
'uni2322': 8994,
'uni25E3': 9699,
'uni22BF': 8895,
'F': 70,
'uni0440': 1088,
'uni255E': 9566,
'uni22BA': 8890,
'uni0175': 373,
'uni0174': 372,
'uni0177': 375,
'uni0176': 374,
'bracketleft': 91,
'uni0170': 368,
'uni0173': 371,
'uni0172': 370,
'asciicircum': 94,
'uni0179': 377,
'uni2590': 9616,
'uni25E2': 9698,
'uni2119': 8473,
'uni2118': 8472,
'uni25CC': 9676,
'f': 102,
'ordmasculine': 186,
'uni229B': 8859,
'uni22A1': 8865,
'uni2111': 8465,
'uni2110': 8464,
'uni2113': 8467,
'uni2112': 8466,
'mu': 181,
'uni2281': 8833,
'paragraph': 182,
'nine': 57,
'uni25EC': 9708,
'v': 118,
'uni040C': 1036,
'uni0113': 275,
'uni22D0': 8912,
'uni21CC': 8652,
'uni21CB': 8651,
'uni21CA': 8650,
'uni22A5': 8869,
'uni21CF': 8655,
'uni21CE': 8654,
'uni21CD': 8653,
'guilsinglleft': 8249,
'backslash': 92,
'uni2284': 8836,
'uni224E': 8782,
'uni224D': 8781,
'uni224F': 8783,
'uni224A': 8778,
'uni2287': 8839,
'uni224C': 8780,
'uni224B': 8779,
'uni21BD': 8637,
'uni2286': 8838,
'uni030F': 783,
'uni030D': 781,
'uni030E': 782,
'uni030B': 779,
'uni030C': 780,
'uni030A': 778,
'uni026E': 622,
'uni026D': 621,
'six': 54,
'uni026A': 618,
'uni026C': 620,
'uni25C1': 9665,
'uni20D6': 8406,
'uni045B': 1115,
'uni045C': 1116,
'uni256B': 9579,
'uni045A': 1114,
'uni045F': 1119,
'uni045E': 1118,
'A': 65,
'uni2569': 9577,
'uni0458': 1112,
'uni0459': 1113,
'uni0452': 1106,
'uni0453': 1107,
'uni2562': 9570,
'uni0451': 1105,
'uni0456': 1110,
'uni0457': 1111,
'uni0454': 1108,
'uni0455': 1109,
'icircumflex': 238,
'uni0307': 775,
'uni0304': 772,
'uni0305': 773,
'uni0269': 617,
'uni0268': 616,
'uni0300': 768,
'uni0301': 769,
'uni0265': 613,
'uni0264': 612,
'uni0267': 615,
'uni0266': 614,
'uni0261': 609,
'uni0260': 608,
'uni0263': 611,
'uni0262': 610,
'a': 97,
'uni2207': 8711,
'uni2247': 8775,
'uni2246': 8774,
'uni2241': 8769,
'uni2240': 8768,
'uni2243': 8771,
'uni2242': 8770,
'uni2312': 8978,
'ogonek': 731,
'uni2249': 8777,
'uni2248': 8776,
'uni3030': 12336,
'q': 113,
'uni21C2': 8642,
'uni21C1': 8641,
'uni21C0': 8640,
'uni21C7': 8647,
'uni21C6': 8646,
'uni21C5': 8645,
'uni21C4': 8644,
'uni225F': 8799,
'uni212C': 8492,
'uni21C8': 8648,
'uni2467': 9319,
'oacute': 243,
'uni028F': 655,
'uni028E': 654,
'uni026F': 623,
'uni028C': 652,
'uni028B': 651,
'uni028A': 650,
'uni2510': 9488,
'ograve': 242,
'edieresis': 235,
'uni22CE': 8910,
'uni22CF': 8911,
'uni219F': 8607,
'comma': 44,
'uni22CA': 8906,
'uni0429': 1065,
'uni03C6': 966,
'uni0427': 1063,
'uni0426': 1062,
'uni0425': 1061,
'uni0424': 1060,
'uni0423': 1059,
'uni0422': 1058,
'uni0421': 1057,
'uni0420': 1056,
'uni2465': 9317,
'uni24D0': 9424,
'uni2464': 9316,
'uni0430': 1072,
'otilde': 245,
'uni2661': 9825,
'uni24D6': 9430,
'uni2466': 9318,
'uni24D5': 9429,
'uni219A': 8602,
'uni2518': 9496,
'uni22B6': 8886,
'uni2461': 9313,
'uni24D4': 9428,
'uni2460': 9312,
'uni24EA': 9450,
'guillemotright': 187,
'ecircumflex': 234,
'greater': 62,
'uni2011': 8209,
'uacute': 250,
'uni2462': 9314,
'L': 76,
'bullet': 8226,
'uni02A4': 676,
'uni02A7': 679,
'cedilla': 184,
'uni02A2': 674,
'uni2015': 8213,
'uni22C4': 8900,
'uni22C5': 8901,
'uni22AD': 8877,
'uni22C7': 8903,
'uni22C0': 8896,
'uni2016': 8214,
'uni22C2': 8898,
'uni22C3': 8899,
'uni24CF': 9423,
'uni042F': 1071,
'uni042E': 1070,
'uni042D': 1069,
'ydieresis': 255,
'l': 108,
'logicalnot': 172,
'uni24CA': 9418,
'uni0287': 647,
'uni0286': 646,
'uni0285': 645,
'uni0284': 644,
'uni0283': 643,
'uni0282': 642,
'uni0281': 641,
'uni027C': 636,
'uni2664': 9828,
'exclamdown': 161,
'uni25C4': 9668,
'uni0289': 649,
'uni0288': 648,
'uni039A': 922,
'endash': 8211,
'uni2640': 9792,
'uni20E4': 8420,
'uni0473': 1139,
'uni20E1': 8417,
'uni2642': 9794,
'uni03B8': 952,
'uni03B9': 953,
'agrave': 224,
'uni03B4': 948,
'uni03B5': 949,
'uni03B6': 950,
'uni03B7': 951,
'uni03B0': 944,
'uni03B1': 945,
'uni03B2': 946,
'uni03B3': 947,
'uni2555': 9557,
'Adieresis': 196,
'germandbls': 223,
'Odieresis': 214,
'space': 32,
'uni0126': 294,
'uni0127': 295,
'uni0124': 292,
'uni0125': 293,
'uni0122': 290,
'uni0123': 291,
'uni0120': 288,
'uni0121': 289,
'quoteright': 8217,
'uni2560': 9568,
'uni2556': 9558,
'ucircumflex': 251,
'uni2561': 9569,
'uni2551': 9553,
'uni25B2': 9650,
'uni2550': 9552,
'uni2563': 9571,
'uni2553': 9555,
'G': 71,
'uni2564': 9572,
'uni2552': 9554,
'quoteleft': 8216,
'uni2565': 9573,
'uni2572': 9586,
'uni2568': 9576,
'uni2566': 9574,
'W': 87,
'uni214A': 8522,
'uni012F': 303,
'uni012D': 301,
'uni012E': 302,
'uni012B': 299,
'uni012C': 300,
'uni255C': 9564,
'uni012A': 298,
'uni2289': 8841,
'Q': 81,
'uni2320': 8992,
'uni2321': 8993,
'g': 103,
'uni03BD': 957,
'uni03BE': 958,
'uni03BF': 959,
'uni2282': 8834,
'uni2285': 8837,
'uni03BA': 954,
'uni03BB': 955,
'uni03BC': 956,
'uni2128': 8488,
'uni25B7': 9655,
'w': 119,
'uni0302': 770,
'uni03DE': 990,
'uni25DA': 9690,
'uni0303': 771,
'uni0463': 1123,
'uni0462': 1122,
'uni3018': 12312,
'uni2514': 9492,
'question': 63,
'uni25B3': 9651,
'uni24E1': 9441,
'one': 49,
'uni200A': 8202,
'uni2278': 8824,
'ring': 730,
'uni0195': 405,
'figuredash': 8210,
'uni22EC': 8940,
'uni0339': 825,
'uni0338': 824,
'uni0337': 823,
'uni0336': 822,
'uni0335': 821,
'uni0333': 819,
'uni0332': 818,
'uni0331': 817,
'uni0330': 816,
'uni01C1': 449,
'uni01C0': 448,
'uni01C3': 451,
'uni01C2': 450,
'uni2353': 9043,
'uni0308': 776,
'uni2218': 8728,
'uni2219': 8729,
'uni2216': 8726,
'uni2217': 8727,
'uni2214': 8724,
'uni0309': 777,
'uni2609': 9737,
'uni2213': 8723,
'uni2210': 8720,
'uni2211': 8721,
'uni2245': 8773,
'B': 66,
'uni25D6': 9686,
'iacute': 237,
'uni02E6': 742,
'uni02E7': 743,
'uni02E8': 744,
'uni02E9': 745,
'uni221D': 8733,
'uni221E': 8734,
'Ydieresis': 376,
'uni221C': 8732,
'uni22D7': 8919,
'uni221A': 8730,
'R': 82,
'uni24DC': 9436,
'uni033F': 831,
'uni033E': 830,
'uni033C': 828,
'uni033B': 827,
'uni033A': 826,
'b': 98,
'uni228A': 8842,
'uni22DB': 8923,
'uni2554': 9556,
'uni046B': 1131,
'uni046A': 1130,
'r': 114,
'uni24DB': 9435,
'Ccedilla': 199,
'minus': 8722,
'uni24DA': 9434,
'uni03F0': 1008,
'uni03F1': 1009,
'uni20AC': 8364,
'uni2276': 8822,
'uni24C0': 9408,
'uni0162': 354,
'uni0163': 355,
'uni011E': 286,
'uni011D': 285,
'uni011C': 284,
'uni011B': 283,
'uni0164': 356,
'uni0165': 357,
'Lslash': 321,
'uni0168': 360,
'uni0169': 361,
'uni25C9': 9673,
'uni02E5': 741,
'uni21C3': 8643,
'uni24C4': 9412,
'uni24E2': 9442,
'uni2277': 8823,
'uni013A': 314,
'uni2102': 8450,
'Uacute': 218,
'uni2317': 8983,
'uni2107': 8455,
'uni221F': 8735,
'yacute': 253,
'uni3012': 12306,
'Ucircumflex': 219,
'uni015D': 349,
'quotedbl': 34,
'uni25D9': 9689,
'uni2280': 8832,
'uni22AF': 8879,
'onehalf': 189,
'uni221B': 8731,
'Thorn': 222,
'uni2226': 8742,
'M': 77,
'uni25BA': 9658,
'uni2463': 9315,
'uni2336': 9014,
'eight': 56,
'uni2236': 8758,
'multiply': 215,
'uni210C': 8460,
'uni210A': 8458,
'uni21C9': 8649,
'grave': 96,
'uni210E': 8462,
'uni0117': 279,
'uni016C': 364,
'uni0115': 277,
'uni016A': 362,
'uni016F': 367,
'uni0112': 274,
'uni016D': 365,
'uni016E': 366,
'Ocircumflex': 212,
'uni2305': 8965,
'm': 109,
'uni24DF': 9439,
'uni0119': 281,
'uni0118': 280,
'uni20A3': 8355,
'uni20A4': 8356,
'uni20A7': 8359,
'uni2288': 8840,
'uni24C3': 9411,
'uni251C': 9500,
'uni228D': 8845,
'uni222F': 8751,
'uni222E': 8750,
'uni222D': 8749,
'uni222C': 8748,
'uni222B': 8747,
'uni222A': 8746,
'uni255B': 9563,
'Ugrave': 217,
'uni24DE': 9438,
'guilsinglright': 8250,
'uni250A': 9482,
'Ntilde': 209,
'uni0279': 633,
'questiondown': 191,
'uni256C': 9580,
'Atilde': 195,
'uni0272': 626,
'uni0273': 627,
'uni0270': 624,
'ccedilla': 231,
'uni0276': 630,
'uni0277': 631,
'uni0274': 628,
'uni0275': 629,
'uni2252': 8786,
'uni041F': 1055,
'uni2250': 8784,
'Z': 90,
'uni2256': 8790,
'uni2257': 8791,
'copyright': 169,
'uni2255': 8789,
'uni043D': 1085,
'uni043E': 1086,
'uni043F': 1087,
'yen': 165,
'uni041D': 1053,
'uni043B': 1083,
'uni043C': 1084,
'uni21B0': 8624,
'uni21B1': 8625,
'uni21B2': 8626,
'uni21B3': 8627,
'uni21B4': 8628,
'uni21B5': 8629,
'uni21B6': 8630,
'uni21B7': 8631,
'uni21B8': 8632,
'Eacute': 201,
'uni2311': 8977,
'uni2310': 8976,
'uni228F': 8847,
'uni25DB': 9691,
'uni21BA': 8634,
'uni21BB': 8635,
'uni21BC': 8636,
'uni2017': 8215,
'uni21BE': 8638,
'uni21BF': 8639,
'uni231C': 8988,
'H': 72,
'uni0293': 659,
'uni2202': 8706,
'uni22A4': 8868,
'uni231E': 8990,
'uni2232': 8754,
'uni225B': 8795,
'uni225C': 8796,
'uni24D9': 9433,
'uni225A': 8794,
'uni0438': 1080,
'uni0439': 1081,
'uni225D': 8797,
'uni225E': 8798,
'uni0434': 1076,
'X': 88,
'uni007F': 127,
'uni0437': 1079,
'Idieresis': 207,
'uni0431': 1073,
'uni0432': 1074,
'uni0433': 1075,
'uni22AC': 8876,
'uni22CD': 8909,
'uni25A3': 9635,
'bar': 124,
'uni24BB': 9403,
'uni037E': 894,
'uni027B': 635,
'h': 104,
'uni027A': 634,
'uni027F': 639,
'uni027D': 637,
'uni027E': 638,
'uni2227': 8743,
'uni2004': 8196,
'uni2225': 8741,
'uni2224': 8740,
'uni2223': 8739,
'uni2222': 8738,
'uni2221': 8737,
'uni2220': 8736,
'x': 120,
'uni2323': 8995,
'uni2559': 9561,
'uni2558': 9560,
'uni2229': 8745,
'uni2228': 8744,
'udieresis': 252,
'uni029D': 669,
'ordfeminine': 170,
'uni22CB': 8907,
'uni233D': 9021,
'uni0428': 1064,
'uni24C6': 9414,
'uni22DD': 8925,
'uni24C7': 9415,
'uni015C': 348,
'uni015B': 347,
'uni015A': 346,
'uni22AA': 8874,
'uni015F': 351,
'uni015E': 350,
'braceleft': 123,
'uni24C5': 9413,
'uni0410': 1040,
'uni03AA': 938,
'uni24C2': 9410,
'uni03AC': 940,
'uni03AB': 939,
'macron': 175,
'uni03AD': 941,
'uni03AF': 943,
'uni0294': 660,
'uni0295': 661,
'uni0296': 662,
'uni0297': 663,
'uni0290': 656,
'uni0291': 657,
'uni0292': 658,
'atilde': 227,
'Acircumflex': 194,
'uni2370': 9072,
'uni24C1': 9409,
'uni0298': 664,
'uni0299': 665,
'Oslash': 216,
'uni029E': 670,
'C': 67,
'quotedblleft': 8220,
'uni029B': 667,
'uni029C': 668,
'uni03A9': 937,
'uni03A8': 936,
'S': 83,
'uni24C9': 9417,
'uni03A1': 929,
'uni03A0': 928,
'exclam': 33,
'uni03A5': 933,
'uni03A4': 932,
'uni03A7': 935,
'Zcaron': 381,
'uni2133': 8499,
'uni2132': 8498,
'uni0159': 345,
'uni0158': 344,
'uni2137': 8503,
'uni2005': 8197,
'uni2135': 8501,
'uni2134': 8500,
'uni02BA': 698,
'uni2033': 8243,
'uni0151': 337,
'uni0150': 336,
'uni0157': 343,
'equal': 61,
'uni0155': 341,
'uni0154': 340,
's': 115,
'uni233F': 9023,
'eth': 240,
'uni24BE': 9406,
'uni21E9': 8681,
'uni2060': 8288,
'Egrave': 200,
'uni255D': 9565,
'uni24CD': 9421,
'uni21E1': 8673,
'uni21B9': 8633,
'hyphen': 45,
'uni01BE': 446,
'uni01BB': 443,
'period': 46,
'igrave': 236,
'uni01BA': 442,
'uni2296': 8854,
'uni2297': 8855,
'uni2294': 8852,
'uni2295': 8853,
'colon': 58,
'uni2293': 8851,
'uni2290': 8848,
'uni2291': 8849,
'uni032D': 813,
'uni032E': 814,
'uni032F': 815,
'uni032A': 810,
'uni032B': 811,
'uni032C': 812,
'uni231D': 8989,
'Ecircumflex': 202,
'uni24D7': 9431,
'uni25DD': 9693,
'trademark': 8482,
'Aacute': 193,
'cent': 162,
'uni0445': 1093,
'uni266E': 9838,
'uni266D': 9837,
'uni266B': 9835,
'uni03C9': 969,
'uni2003': 8195,
'uni2047': 8263,
'lslash': 322,
'uni03A6': 934,
'uni2043': 8259,
'uni250C': 9484,
'uni2040': 8256,
'uni255F': 9567,
'uni24CB': 9419,
'uni0472': 1138,
'uni0446': 1094,
'uni0474': 1140,
'uni0475': 1141,
'uni2508': 9480,
'uni2660': 9824,
'uni2506': 9478,
'uni2502': 9474,
'c': 99,
'uni2500': 9472,
'N': 78,
'uni22A6': 8870,
'uni21E7': 8679,
'uni2130': 8496,
'uni2002': 8194,
'breve': 728,
'uni0442': 1090,
'Oacute': 211,
'uni229F': 8863,
'uni25C7': 9671,
'uni229D': 8861,
'uni229E': 8862,
'guillemotleft': 171,
'uni0329': 809,
'uni24E5': 9445,
'uni011F': 287,
'uni0324': 804,
'uni0325': 805,
'uni0326': 806,
'uni0327': 807,
'uni0321': 801,
'uni0322': 802,
'n': 110,
'uni2032': 8242,
'uni2269': 8809,
'uni2268': 8808,
'uni0306': 774,
'uni226B': 8811,
'uni21EA': 8682,
'uni0166': 358,
'uni203B': 8251,
'uni01B5': 437,
'idieresis': 239,
'uni02BC': 700,
'uni01B0': 432,
'braceright': 125,
'seven': 55,
'uni02BB': 699,
'uni011A': 282,
'uni29FB': 10747,
'brokenbar': 166,
'uni2036': 8246,
'uni25C0': 9664,
'uni0156': 342,
'uni22D5': 8917,
'uni0258': 600,
'ugrave': 249,
'uni22D6': 8918,
'uni22D1': 8913,
'uni2034': 8244,
'uni22D3': 8915,
'uni22D2': 8914,
'uni203C': 8252,
'uni223E': 8766,
'uni02BF': 703,
'uni22D9': 8921,
'uni22D8': 8920,
'uni25BD': 9661,
'uni25BE': 9662,
'uni25BF': 9663,
'uni041B': 1051,
'periodcentered': 183,
'uni25BC': 9660,
'uni019E': 414,
'uni019B': 411,
'uni019A': 410,
'uni2007': 8199,
'uni0391': 913,
'uni0390': 912,
'uni0393': 915,
'uni0392': 914,
'uni0395': 917,
'uni0394': 916,
'uni0397': 919,
'uni0396': 918,
'uni0399': 921,
'uni0398': 920,
'uni25C8': 9672,
'uni2468': 9320,
'sterling': 163,
'uni22EB': 8939,
'uni039C': 924,
'uni039B': 923,
'uni039E': 926,
'uni039D': 925,
'uni039F': 927,
'I': 73,
'uni03E1': 993,
'uni03E0': 992,
'uni2319': 8985,
'uni228B': 8843,
'uni25B5': 9653,
'uni25B6': 9654,
'uni22EA': 8938,
'uni24B9': 9401,
'uni044E': 1102,
'uni0199': 409,
'uni2266': 8806,
'Y': 89,
'uni22A2': 8866,
'Eth': 208,
'uni266F': 9839,
'emdash': 8212,
'uni263B': 9787,
'uni24BD': 9405,
'uni22DE': 8926,
'uni0360': 864,
'uni2557': 9559,
'uni22DF': 8927,
'uni22DA': 8922,
'uni22DC': 8924,
'uni0361': 865,
'i': 105,
'uni24BF': 9407,
'uni0362': 866,
'uni263E': 9790,
'uni028D': 653,
'uni2259': 8793,
'uni0323': 803,
'uni2265': 8805,
'daggerdbl': 8225,
'y': 121,
'uni010A': 266,
'plusminus': 177,
'less': 60,
'uni21AE': 8622,
'uni0315': 789,
'uni230B': 8971,
'uni21AF': 8623,
'uni21AA': 8618,
'uni21AC': 8620,
'uni21AB': 8619,
'uni01FB': 507,
'uni01FC': 508,
'uni223A': 8762,
'uni01FA': 506,
'uni01FF': 511,
'uni01FD': 509,
'uni01FE': 510,
'uni2567': 9575,
'uni25E0': 9696,
'uni0104': 260,
'uni0105': 261,
'uni0106': 262,
'uni0107': 263,
'uni0100': 256,
'uni0101': 257,
'uni0102': 258,
'uni0103': 259,
'uni2038': 8248,
'uni2009': 8201,
'uni2008': 8200,
'uni0108': 264,
'uni0109': 265,
'uni02A1': 673,
'uni223B': 8763,
'uni226C': 8812,
'uni25AC': 9644,
'uni24D3': 9427,
'uni21E0': 8672,
'uni21E3': 8675,
'Udieresis': 220,
'uni21E2': 8674,
'D': 68,
'uni21E5': 8677,
'uni2621': 9761,
'uni21D1': 8657,
'uni203E': 8254,
'uni22C6': 8902,
'uni21E4': 8676,
'uni010D': 269,
'uni010E': 270,
'uni010F': 271,
'five': 53,
'T': 84,
'uni010B': 267,
'uni010C': 268,
'uni2605': 9733,
'uni2663': 9827,
'uni21E6': 8678,
'uni24B6': 9398,
'uni22C1': 8897,
'oslash': 248,
'acute': 180,
'uni01F0': 496,
'd': 100,
'OE': 338,
'uni22E3': 8931,
'Igrave': 204,
'uni2308': 8968,
'uni2309': 8969,
'uni21A9': 8617,
't': 116,
'uni2313': 8979,
'uni03A3': 931,
'uni21A4': 8612,
'uni21A7': 8615,
'uni21A6': 8614,
'uni21A1': 8609,
'uni21A0': 8608,
'uni21A3': 8611,
'uni21A2': 8610,
'parenright': 41,
'uni256A': 9578,
'uni25DC': 9692,
'uni24CE': 9422,
'uni042C': 1068,
'uni24E0': 9440,
'uni042B': 1067,
'uni0409': 1033,
'uni0408': 1032,
'uni24E7': 9447,
'uni25B4': 9652,
'uni042A': 1066,
'uni228E': 8846,
'uni0401': 1025,
'adieresis': 228,
'uni0403': 1027,
'quotesingle': 39,
'uni0405': 1029,
'uni0404': 1028,
'uni0407': 1031,
'uni0406': 1030,
'uni229C': 8860,
'uni2306': 8966,
'uni2253': 8787,
'twodotenleader': 8229,
'uni2131': 8497,
'uni21DA': 8666,
'uni2234': 8756,
'uni2235': 8757,
'uni01A5': 421,
'uni2237': 8759,
'uni2230': 8752,
'uni02CC': 716,
'slash': 47,
'uni01A0': 416,
'ellipsis': 8230,
'uni2299': 8857,
'uni2238': 8760,
'numbersign': 35,
'uni21A8': 8616,
'uni223D': 8765,
'uni01AF': 431,
'uni223F': 8767,
'uni01AD': 429,
'uni01AB': 427,
'odieresis': 246,
'uni223C': 8764,
'uni227D': 8829,
'uni0280': 640,
'O': 79,
'uni227E': 8830,
'uni21A5': 8613,
'uni22D4': 8916,
'uni25D4': 9684,
'uni227F': 8831,
'uni0435': 1077,
'uni2302': 8962,
'uni2669': 9833,
'uni24E3': 9443,
'uni2720': 10016,
'uni22A8': 8872,
'uni22A9': 8873,
'uni040A': 1034,
'uni22A7': 8871,
'oe': 339,
'uni040B': 1035,
'uni040E': 1038,
'uni22A3': 8867,
'o': 111,
'uni040F': 1039,
'Edieresis': 203,
'uni25D5': 9685,
'plus': 43,
'uni044D': 1101,
'uni263C': 9788,
'uni22E6': 8934,
'uni2283': 8835,
'uni258C': 9612,
'uni219E': 8606,
'uni24E4': 9444,
'uni2136': 8502,
'dagger': 8224,
'uni24B7': 9399,
'uni219B': 8603,
'uni22E5': 8933,
'three': 51,
'uni210B': 8459,
'uni2534': 9524,
'uni24B8': 9400,
'uni230A': 8970,
'hungarumlaut': 733,
'parenleft': 40,
'uni0148': 328,
'uni0149': 329,
'uni2124': 8484,
'uni2125': 8485,
'uni2126': 8486,
'uni2127': 8487,
'uni0140': 320,
'uni2129': 8489,
'uni25C5': 9669,
'uni0143': 323,
'uni0144': 324,
'uni0145': 325,
'uni0146': 326,
'uni0147': 327,
'uni210D': 8461,
'fraction': 8260,
'uni2031': 8241,
'uni2196': 8598,
'uni2035': 8245,
'uni24E6': 9446,
'uni016B': 363,
'uni24BA': 9402,
'uni266A': 9834,
'uni0116': 278,
'uni2115': 8469,
'registered': 174,
'J': 74,
'uni25DF': 9695,
'uni25CE': 9678,
'uni273D': 10045,
'dieresis': 168,
'uni212B': 8491,
'uni0114': 276,
'uni212D': 8493,
'uni212E': 8494,
'uni212F': 8495,
'uni014A': 330,
'uni014B': 331,
'uni014C': 332,
'uni014D': 333,
'uni014E': 334,
'uni014F': 335,
'uni025E': 606,
'uni24E8': 9448,
'uni0111': 273,
'uni24E9': 9449,
'Ograve': 210,
'j': 106,
'uni2195': 8597,
'uni2194': 8596,
'uni2197': 8599,
'uni2037': 8247,
'uni2191': 8593,
'uni2190': 8592,
'uni2193': 8595,
'uni2192': 8594,
'uni29FA': 10746,
'uni2713': 10003,
'z': 122,
'uni2199': 8601,
'uni2198': 8600,
'uni2667': 9831,
'ae': 230,
'uni0448': 1096,
'semicolon': 59,
'uni2666': 9830,
'uni038F': 911,
'uni0444': 1092,
'uni0447': 1095,
'uni038E': 910,
'uni0441': 1089,
'uni038C': 908,
'uni0443': 1091,
'uni038A': 906,
'uni0250': 592,
'uni0251': 593,
'uni0252': 594,
'uni0253': 595,
'uni0254': 596,
'at': 64,
'uni0256': 598,
'uni0257': 599,
'uni0167': 359,
'uni0259': 601,
'uni228C': 8844,
'uni2662': 9826,
'uni0319': 793,
'uni0318': 792,
'uni24BC': 9404,
'uni0402': 1026,
'uni22EF': 8943,
'Iacute': 205,
'uni22ED': 8941,
'uni22EE': 8942,
'uni0311': 785,
'uni0310': 784,
'uni21E8': 8680,
'uni0312': 786,
'percent': 37,
'uni0317': 791,
'uni0316': 790,
'uni21D6': 8662,
'uni21D7': 8663,
'uni21D4': 8660,
'uni21D5': 8661,
'uni21D2': 8658,
'uni21D3': 8659,
'uni21D0': 8656,
'uni2138': 8504,
'uni2270': 8816,
'uni2271': 8817,
'uni2272': 8818,
'uni2273': 8819,
'uni2274': 8820,
'uni2275': 8821,
'bracketright': 93,
'uni21D9': 8665,
'uni21DF': 8671,
'uni21DD': 8669,
'uni21DE': 8670,
'AE': 198,
'uni03AE': 942,
'uni227A': 8826,
'uni227B': 8827,
'uni227C': 8828,
'asterisk': 42,
'aacute': 225,
'uni226F': 8815,
'uni22E2': 8930,
'uni0386': 902,
'uni22E0': 8928,
'uni22E1': 8929,
'U': 85,
'uni22E7': 8935,
'uni22E4': 8932,
'uni0387': 903,
'uni031A': 794,
'eacute': 233,
'uni22E8': 8936,
'uni22E9': 8937,
'uni24D8': 9432,
'uni025A': 602,
'uni025B': 603,
'uni025C': 604,
'e': 101,
'uni0128': 296,
'uni025F': 607,
'uni2665': 9829,
'thorn': 254,
'uni0129': 297,
'uni253C': 9532,
'uni25D7': 9687,
'u': 117,
'uni0388': 904,
'uni0389': 905,
'uni0255': 597,
'uni0171': 369,
'uni0384': 900,
'uni0385': 901,
'uni044A': 1098,
'uni252C': 9516,
'uni044C': 1100,
'uni044B': 1099}
uni2type1 = dict([(v,k) for k,v in type12uni.items()])
tex2uni = {
'widehat': 0x0302,
'widetilde': 0x0303,
'langle': 0x27e8,
'rangle': 0x27e9,
'perp': 0x27c2,
'neq': 0x2260,
'Join': 0x2a1d,
'leqslant': 0x2a7d,
'geqslant': 0x2a7e,
'lessapprox': 0x2a85,
'gtrapprox': 0x2a86,
'lesseqqgtr': 0x2a8b,
'gtreqqless': 0x2a8c,
'triangleeq': 0x225c,
'eqslantless': 0x2a95,
'eqslantgtr': 0x2a96,
'backepsilon': 0x03f6,
'precapprox': 0x2ab7,
'succapprox': 0x2ab8,
'fallingdotseq': 0x2252,
'subseteqq': 0x2ac5,
'supseteqq': 0x2ac6,
'varpropto': 0x221d,
'precnapprox': 0x2ab9,
'succnapprox': 0x2aba,
'subsetneqq': 0x2acb,
'supsetneqq': 0x2acc,
'lnapprox': 0x2ab9,
'gnapprox': 0x2aba,
'longleftarrow': 0x27f5,
'longrightarrow': 0x27f6,
'longleftrightarrow': 0x27f7,
'Longleftarrow': 0x27f8,
'Longrightarrow': 0x27f9,
'Longleftrightarrow': 0x27fa,
'longmapsto': 0x27fc,
'leadsto': 0x21dd,
'dashleftarrow': 0x290e,
'dashrightarrow': 0x290f,
'circlearrowleft': 0x21ba,
'circlearrowright': 0x21bb,
'leftrightsquigarrow': 0x21ad,
'leftsquigarrow': 0x219c,
'rightsquigarrow': 0x219d,
'Game': 0x2141,
'hbar': 0x0127,
'hslash': 0x210f,
'ldots': 0x22ef,
'vdots': 0x22ee,
'doteqdot': 0x2251,
'doteq': 8784,
'partial': 8706,
'gg': 8811,
'asymp': 8781,
'blacktriangledown': 9662,
'otimes': 8855,
'nearrow': 8599,
'varpi': 982,
'vee': 8744,
'vec': 8407,
'smile': 8995,
'succnsim': 8937,
'gimel': 8503,
'vert': 124,
'|': 124,
'varrho': 1009,
'P': 182,
'approxident': 8779,
'Swarrow': 8665,
'textasciicircum': 94,
'imageof': 8887,
'ntriangleleft': 8938,
'nleq': 8816,
'div': 247,
'nparallel': 8742,
'Leftarrow': 8656,
'lll': 8920,
'oiint': 8751,
'ngeq': 8817,
'Theta': 920,
'origof': 8886,
'blacksquare': 9632,
'solbar': 9023,
'neg': 172,
'sum': 8721,
'Vdash': 8873,
'coloneq': 8788,
'degree': 176,
'bowtie': 8904,
'blacktriangleright': 9654,
'varsigma': 962,
'leq': 8804,
'ggg': 8921,
'lneqq': 8808,
'scurel': 8881,
'stareq': 8795,
'BbbN': 8469,
'nLeftarrow': 8653,
'nLeftrightarrow': 8654,
'k': 808,
'bot': 8869,
'BbbC': 8450,
'Lsh': 8624,
'leftleftarrows': 8647,
'BbbZ': 8484,
'digamma': 989,
'BbbR': 8477,
'BbbP': 8473,
'BbbQ': 8474,
'vartriangleright': 8883,
'succsim': 8831,
'wedge': 8743,
'lessgtr': 8822,
'veebar': 8891,
'mapsdown': 8615,
'Rsh': 8625,
'chi': 967,
'prec': 8826,
'nsubseteq': 8840,
'therefore': 8756,
'eqcirc': 8790,
'textexclamdown': 161,
'nRightarrow': 8655,
'flat': 9837,
'notin': 8713,
'llcorner': 8990,
'varepsilon': 949,
'bigtriangleup': 9651,
'aleph': 8501,
'dotminus': 8760,
'upsilon': 965,
'Lambda': 923,
'cap': 8745,
'barleftarrow': 8676,
'mu': 956,
'boxplus': 8862,
'mp': 8723,
'circledast': 8859,
'tau': 964,
'in': 8712,
'backslash': 92,
'varnothing': 8709,
'sharp': 9839,
'eqsim': 8770,
'gnsim': 8935,
'Searrow': 8664,
'updownarrows': 8645,
'heartsuit': 9825,
'trianglelefteq': 8884,
'ddag': 8225,
'sqsubseteq': 8849,
'mapsfrom': 8612,
'boxbar': 9707,
'sim': 8764,
'Nwarrow': 8662,
'nequiv': 8802,
'succ': 8827,
'vdash': 8866,
'Leftrightarrow': 8660,
'parallel': 8741,
'invnot': 8976,
'natural': 9838,
'ss': 223,
'uparrow': 8593,
'nsim': 8769,
'hookrightarrow': 8618,
'Equiv': 8803,
'approx': 8776,
'Vvdash': 8874,
'nsucc': 8833,
'leftrightharpoons': 8651,
'Re': 8476,
'boxminus': 8863,
'equiv': 8801,
'Lleftarrow': 8666,
'thinspace': 8201,
'll': 8810,
'Cup': 8915,
'measeq': 8798,
'upharpoonleft': 8639,
'lq': 8216,
'Upsilon': 933,
'subsetneq': 8842,
'greater': 62,
'supsetneq': 8843,
'Cap': 8914,
'L': 321,
'spadesuit': 9824,
'lrcorner': 8991,
'not': 824,
'bar': 772,
'rightharpoonaccent': 8401,
'boxdot': 8865,
'l': 322,
'leftharpoondown': 8637,
'bigcup': 8899,
'iint': 8748,
'bigwedge': 8896,
'downharpoonleft': 8643,
'textasciitilde': 126,
'subset': 8834,
'leqq': 8806,
'mapsup': 8613,
'nvDash': 8877,
'looparrowleft': 8619,
'nless': 8814,
'rightarrowbar': 8677,
'Vert': 8214,
'downdownarrows': 8650,
'uplus': 8846,
'simeq': 8771,
'napprox': 8777,
'ast': 8727,
'twoheaduparrow': 8607,
'doublebarwedge': 8966,
'Sigma': 931,
'leftharpoonaccent': 8400,
'ntrianglelefteq': 8940,
'nexists': 8708,
'times': 215,
'measuredangle': 8737,
'bumpeq': 8783,
'carriagereturn': 8629,
'adots': 8944,
'checkmark': 10003,
'lambda': 955,
'xi': 958,
'rbrace': 125,
'rbrack': 93,
'Nearrow': 8663,
'maltese': 10016,
'clubsuit': 9827,
'top': 8868,
'overarc': 785,
'varphi': 966,
'Delta': 916,
'iota': 953,
'nleftarrow': 8602,
'candra': 784,
'supset': 8835,
'triangleleft': 9665,
'gtreqless': 8923,
'ntrianglerighteq': 8941,
'quad': 8195,
'Xi': 926,
'gtrdot': 8919,
'leftthreetimes': 8907,
'minus': 8722,
'preccurlyeq': 8828,
'nleftrightarrow': 8622,
'lambdabar': 411,
'blacktriangle': 9652,
'kernelcontraction': 8763,
'Phi': 934,
'angle': 8736,
'spadesuitopen': 9828,
'eqless': 8924,
'mid': 8739,
'varkappa': 1008,
'Ldsh': 8626,
'updownarrow': 8597,
'beta': 946,
'textquotedblleft': 8220,
'rho': 961,
'alpha': 945,
'intercal': 8890,
'beth': 8502,
'grave': 768,
'acwopencirclearrow': 8634,
'nmid': 8740,
'nsupset': 8837,
'sigma': 963,
'dot': 775,
'Rightarrow': 8658,
'turnednot': 8985,
'backsimeq': 8909,
'leftarrowtail': 8610,
'approxeq': 8778,
'curlyeqsucc': 8927,
'rightarrowtail': 8611,
'Psi': 936,
'copyright': 169,
'yen': 165,
'vartriangleleft': 8882,
'rasp': 700,
'triangleright': 9655,
'precsim': 8830,
'infty': 8734,
'geq': 8805,
'updownarrowbar': 8616,
'precnsim': 8936,
'H': 779,
'ulcorner': 8988,
'looparrowright': 8620,
'ncong': 8775,
'downarrow': 8595,
'circeq': 8791,
'subseteq': 8838,
'bigstar': 9733,
'prime': 8242,
'lceil': 8968,
'Rrightarrow': 8667,
'oiiint': 8752,
'curlywedge': 8911,
'vDash': 8872,
'lfloor': 8970,
'ddots': 8945,
'exists': 8707,
'underbar': 817,
'Pi': 928,
'leftrightarrows': 8646,
'sphericalangle': 8738,
'coprod': 8720,
'circledcirc': 8858,
'gtrsim': 8819,
'gneqq': 8809,
'between': 8812,
'theta': 952,
'complement': 8705,
'arceq': 8792,
'nVdash': 8878,
'S': 167,
'wr': 8768,
'wp': 8472,
'backcong': 8780,
'lasp': 701,
'c': 807,
'nabla': 8711,
'dotplus': 8724,
'eta': 951,
'forall': 8704,
'eth': 240,
'colon': 58,
'sqcup': 8852,
'rightrightarrows': 8649,
'sqsupset': 8848,
'mapsto': 8614,
'bigtriangledown': 9661,
'sqsupseteq': 8850,
'propto': 8733,
'pi': 960,
'pm': 177,
'dots': 8230,
'nrightarrow': 8603,
'textasciiacute': 180,
'Doteq': 8785,
'breve': 774,
'sqcap': 8851,
'twoheadrightarrow': 8608,
'kappa': 954,
'vartriangle': 9653,
'diamondsuit': 9826,
'pitchfork': 8916,
'blacktriangleleft': 9664,
'nprec': 8832,
'vdots': 8942,
'curvearrowright': 8631,
'barwedge': 8892,
'multimap': 8888,
'textquestiondown': 191,
'cong': 8773,
'rtimes': 8906,
'rightzigzagarrow': 8669,
'rightarrow': 8594,
'leftarrow': 8592,
'__sqrt__': 8730,
'twoheaddownarrow': 8609,
'oint': 8750,
'bigvee': 8897,
'eqdef': 8797,
'sterling': 163,
'phi': 981,
'Updownarrow': 8661,
'backprime': 8245,
'emdash': 8212,
'Gamma': 915,
'i': 305,
'rceil': 8969,
'leftharpoonup': 8636,
'Im': 8465,
'curvearrowleft': 8630,
'wedgeq': 8793,
'fallingdotseq': 8786,
'curlyeqprec': 8926,
'questeq': 8799,
'less': 60,
'upuparrows': 8648,
'tilde': 771,
'textasciigrave': 96,
'smallsetminus': 8726,
'ell': 8467,
'cup': 8746,
'danger': 9761,
'nVDash': 8879,
'cdotp': 183,
'cdots': 8943,
'hat': 770,
'eqgtr': 8925,
'enspace': 8194,
'psi': 968,
'frown': 8994,
'acute': 769,
'downzigzagarrow': 8623,
'ntriangleright': 8939,
'cupdot': 8845,
'circleddash': 8861,
'oslash': 8856,
'mho': 8487,
'd': 803,
'sqsubset': 8847,
'cdot': 8901,
'Omega': 937,
'OE': 338,
'veeeq': 8794,
'Finv': 8498,
't': 865,
'leftrightarrow': 8596,
'swarrow': 8601,
'rightthreetimes': 8908,
'rightleftharpoons': 8652,
'lesssim': 8818,
'searrow': 8600,
'because': 8757,
'gtrless': 8823,
'star': 8902,
'nsubset': 8836,
'zeta': 950,
'dddot': 8411,
'bigcirc': 9675,
'Supset': 8913,
'circ': 8728,
'slash': 8725,
'ocirc': 778,
'prod': 8719,
'twoheadleftarrow': 8606,
'daleth': 8504,
'upharpoonright': 8638,
'odot': 8857,
'Uparrow': 8657,
'O': 216,
'hookleftarrow': 8617,
'trianglerighteq': 8885,
'nsime': 8772,
'oe': 339,
'nwarrow': 8598,
'o': 248,
'ddddot': 8412,
'downharpoonright': 8642,
'succcurlyeq': 8829,
'gamma': 947,
'scrR': 8475,
'dag': 8224,
'thickspace': 8197,
'frakZ': 8488,
'lessdot': 8918,
'triangledown': 9663,
'ltimes': 8905,
'scrB': 8492,
'endash': 8211,
'scrE': 8496,
'scrF': 8497,
'scrH': 8459,
'scrI': 8464,
'rightharpoondown': 8641,
'scrL': 8466,
'scrM': 8499,
'frakC': 8493,
'nsupseteq': 8841,
'circledR': 174,
'circledS': 9416,
'ngtr': 8815,
'bigcap': 8898,
'scre': 8495,
'Downarrow': 8659,
'scrg': 8458,
'overleftrightarrow': 8417,
'scro': 8500,
'lnsim': 8934,
'eqcolon': 8789,
'curlyvee': 8910,
'urcorner': 8989,
'lbrace': 123,
'Bumpeq': 8782,
'delta': 948,
'boxtimes': 8864,
'overleftarrow': 8406,
'prurel': 8880,
'clubsuitopen': 9831,
'cwopencirclearrow': 8635,
'geqq': 8807,
'rightleftarrows': 8644,
'ac': 8766,
'ae': 230,
'int': 8747,
'rfloor': 8971,
'risingdotseq': 8787,
'nvdash': 8876,
'diamond': 8900,
'ddot': 776,
'backsim': 8765,
'oplus': 8853,
'triangleq': 8796,
'check': 780,
'ni': 8715,
'iiint': 8749,
'ne': 8800,
'lesseqgtr': 8922,
'obar': 9021,
'supseteq': 8839,
'nu': 957,
'AA': 8491,
'AE': 198,
'models': 8871,
'ominus': 8854,
'dashv': 8867,
'omega': 969,
'rq': 8217,
'Subset': 8912,
'rightharpoonup': 8640,
'Rdsh': 8627,
'bullet': 8729,
'divideontimes': 8903,
'lbrack': 91,
'textquotedblright': 8221,
'Colon': 8759,
'%': 37,
'$': 36,
'{': 123,
'}': 125,
'_': 95,
'imath': 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to': 8594,
'succeq': 8829,
'emptyset': 8709,
'leftparen': 40,
'rightparen': 41,
'bigoplus': 10753,
'leftangle': 10216,
'rightangle': 10217,
'leftbrace': 124,
'rightbrace': 125,
'jmath': 567,
'bigodot': 10752,
'preceq': 8828,
'biguplus': 10756,
'epsilon': 949,
'vartheta': 977,
'bigotimes': 10754
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C (missing in beta STIX fonts)
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H (missing in beta STIX fonts)
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N (missing in beta STIX fonts)
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q (missing in beta STIX fonts)
(0x0052, 0x0052, 'it', 0x211d), # R (missing in beta STIX fonts)
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z (missing in beta STIX fonts)
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x005a, 'bf', 0xe38a), # A-Z
(0x0061, 0x007a, 'bf', 0xe39d), # a-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2113), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| gpl-3.0 |
vybstat/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
glemaitre/UnbalancedDataset | imblearn/under_sampling/prototype_generation/cluster_centroids.py | 2 | 7740 | """Class to perform under-sampling by generating centroids based on
clustering."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# License: MIT
from __future__ import division, print_function
import numpy as np
from scipy import sparse
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import safe_indexing
from ..base import BaseUnderSampler
VOTING_KIND = ('auto', 'hard', 'soft')
class ClusterCentroids(BaseUnderSampler):
"""Perform under-sampling by generating centroids based on
clustering methods.
Method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans
algorithm. This algorithm keeps N majority samples by fitting the
KMeans algorithm with N cluster to the majority class and using
the coordinates of the N cluster centroids as the new majority
samples.
Read more in the :ref:`User Guide <cluster_centroids>`.
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for over-sampling methods and ``'not
minority'`` for under-sampling methods. The classes targeted will be
over-sampled or under-sampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
estimator : object, optional(default=KMeans())
Pass a :class:`sklearn.cluster.KMeans` estimator.
voting : str, optional (default='auto')
Voting strategy to generate the new samples:
- If ``'hard'``, the nearest-neighbors of the centroids found using the
clustering algorithm will be used.
- If ``'soft'``, the centroids found by the clustering algorithm will
be used.
- If ``'auto'``, if the input is sparse, it will default on ``'hard'``
otherwise, ``'soft'`` will be used.
.. versionadded:: 0.3.0
n_jobs : int, optional (default=1)
The number of threads to open if possible.
Notes
-----
Supports mutli-class resampling by sampling each class independently.
See :ref:`sphx_glr_auto_examples_under-sampling_plot_cluster_centroids.py`.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import \
ClusterCentroids # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> cc = ClusterCentroids(random_state=42)
>>> X_res, y_res = cc.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
... # doctest: +ELLIPSIS
Resampled dataset shape Counter({...})
"""
def __init__(self,
ratio='auto',
random_state=None,
estimator=None,
voting='auto',
n_jobs=1):
super(ClusterCentroids, self).__init__(
ratio=ratio)
self.random_state = random_state
self.estimator = estimator
self.voting = voting
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Private function to create the KMeans estimator"""
if self.estimator is None:
self.estimator_ = KMeans(
random_state=self.random_state, n_jobs=self.n_jobs)
elif isinstance(self.estimator, KMeans):
self.estimator_ = self.estimator
else:
raise ValueError('`estimator` has to be a KMeans clustering.'
' Got {} instead.'.format(type(self.estimator)))
def _generate_sample(self, X, y, centroids, target_class):
if self.voting_ == 'hard':
nearest_neighbors = NearestNeighbors(n_neighbors=1)
nearest_neighbors.fit(X, y)
indices = nearest_neighbors.kneighbors(centroids,
return_distance=False)
X_new = safe_indexing(X, np.squeeze(indices))
else:
if sparse.issparse(X):
X_new = sparse.csr_matrix(centroids)
else:
X_new = centroids
y_new = np.array([target_class] * centroids.shape[0])
return X_new, y_new
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {ndarray, sparse matrix}, shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new,)
The corresponding label of `X_resampled`
"""
self._validate_estimator()
if self.voting == 'auto':
if sparse.issparse(X):
self.voting_ = 'hard'
else:
self.voting_ = 'soft'
else:
if self.voting in VOTING_KIND:
self.voting_ = self.voting
else:
raise ValueError("'voting' needs to be one of {}. Got {}"
" instead.".format(VOTING_KIND, self.voting))
X_resampled, y_resampled = [], []
for target_class in np.unique(y):
if target_class in self.ratio_.keys():
n_samples = self.ratio_[target_class]
self.estimator_.set_params(**{'n_clusters': n_samples})
self.estimator_.fit(X[y == target_class])
X_new, y_new = self._generate_sample(
X, y, self.estimator_.cluster_centers_, target_class)
X_resampled.append(X_new)
y_resampled.append(y_new)
else:
target_class_indices = np.flatnonzero(y == target_class)
X_resampled.append(safe_indexing(X, target_class_indices))
y_resampled.append(safe_indexing(y, target_class_indices))
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, np.array(y_resampled)
| mit |
gkoh/pynab | scripts/nzedb_pre_import.py | 2 | 7655 | """
Pynab nzedb pre import
Imports pre files from nzedb dropbox
Usage:
nzedb_pre_import.py large|small
Options:
-h --help Show this screen.
--version Show version.
"""
# This is quite possibly the most hilariously complex import process...
# What I can gather as the column names from the csv, in case anyone else wants to do this.
# title 1, nfo, size, files, filename 9, nuked 11, nukereason, category 15 , predate 17, source 19, requestid 21, groupname 23
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from pynab.db import db_session, engine, Pre, copy_file
from pynab import releases
import urllib
import regex
import json
import io
from docopt import docopt
from pySmartDL import SmartDL
# Panadas is required
try:
import pandas
except:
print("pandas is required to use nzedb pre import: pip install pandas")
# BeautifulSoup is required
try:
from bs4 import BeautifulSoup
except:
print("BeautifulSoup is required to use nzedb pre import: pip install beautifulsoup4")
# Regex used to strip out the file name
FILENAME_REGEX = regex.compile(
"https:\/\/raw.githubusercontent.com\/nZEDb\/nZEDbPre_Dumps\/master\/dumps\/(?P<lastfile>.+)_.+_.+")
COLNAMES = ["name", "filename", "nuked", "category", "pretime", "source", "requestid", "requestgroup"]
INSERTFAILS = []
def nzedbPre():
downloadLinks = []
try:
rawpreJSON = urllib.request.urlopen("https://api.github.com/repositories/45781004/contents/dumps").read()
except:
print("pre-import: Error connecting to dropbox, try again later")
try:
data = open('lastfile.json')
lastFileFromDisk = json.load(data)
except:
print("pre-import: No existinfg file found, will attempt to download and insert all pres")
lastFileFromDisk = None
preJSON = json.loads(rawpreJSON.decode('utf8'))
for x in preJSON:
if x["name"] != "0README.txt":
downloadLinks.append(x["download_url"])
# Try and process each of the csv's. If they are
for preCSV in downloadLinks:
processingFile = FILENAME_REGEX.search(preCSV).groupdict()
if lastFileFromDisk is None or int(processingFile['lastfile']) > lastFileFromDisk['lastfile']:
try:
print("pre-import: Attempting to download file: {}".format(processingFile['lastfile']))
urllib.request.urlretrieve(preCSV, "unformattedDL.gz")
except:
print("pre-import: Error downloading: {} - Please run the process again".format(preCSV))
INSERTFAILS.append(processingFile['lastfile'])
# The assumption here is, if one fails, you should probably just start again at that file.
break
# Get the data into datatable, much easier to work with.
dirtyFile = pandas.read_csv('unformattedDL.gz', sep='\t', compression='gzip', header=None, na_values='\\N',
usecols=[0, 8, 10, 14, 16, 18, 20, 22], names=COLNAMES)
# Clean and process the file
process(dirtyFile, processingFile)
else:
print("pre-import: More than likely {} has already been imported".format(processingFile['lastfile']))
pass
if INSERTFAILS is not None:
print("pre-import: Failures: {}".format(INSERTFAILS))
def largeNzedbPre():
if os.path.isfile('predb_dump-062714.csv.gz'):
fileExists = True
else:
try:
url = "https://www.dropbox.com/s/btr42dtzzyu3hh3/predb_dump-062714.csv.gz?dl=1"
dest = "."
print("pre-import: File predb_dump-062714.csv not found, attempt to download - may take a while, its 300mb")
obj = SmartDL(url, dest)
obj.start()
fileExists = True
except:
print("pre-import: Error downloading/unzipping. Please try again.")
exit(0)
if fileExists:
dirtyChunk = pandas.read_table('predb_dump-062714.csv.gz', compression='gzip', sep='\t', header=None,
na_values='\\N', usecols=[0, 8, 10, 14, 16, 18, 20, 22], names=COLNAMES,
chunksize=10000, engine='c', error_bad_lines=False, warn_bad_lines=False)
else:
print("pre-import: File predb_dump-062714.csv not found, please try again.")
exit(0)
i = 0
for chunk in dirtyChunk:
process(chunk)
print("pre-import: Imported chunk {}".format(i))
i += 1
def process(precsv, processingFile=None):
ordering = ['name', 'filename', 'nuked', 'category', 'pretime', 'source', 'requestid', 'requestgroup', 'searchname']
# Clean up the file a bit.
precsv.replace("'", "", inplace=True, regex=True)
precsv["nuked"].replace("2", "0", inplace=True)
precsv["nuked"].replace("3", "1", inplace=True)
precsv["nuked"].replace("4", "1", inplace=True)
precsv["nuked"].replace("5", "1", inplace=True)
precsv["nuked"].replace("69", "0", inplace=True)
precsv.replace(".\\N$", '', inplace=True, regex=True)
# Sometimes there are duplicates within the table itself, remove them
precsv.drop_duplicates(subset='name', take_last=True, inplace=True)
# Add clean searchname column
precsv['searchname'] = precsv['name'].map(lambda name: releases.clean_release_name(name))
# Drop the pres without requestid's
precsv = precsv[precsv.requestid != '0']
# Create a list of names to check if they exist
names = list(precsv.name)
# Query to find any existing pres, we need to delete them so COPY doesn't fail
prenamelist = []
with db_session() as db:
if names:
pres = db.query(Pre).filter(Pre.name.in_(names)).all()
for pre in pres:
prenamelist.append(pre.name)
data = io.StringIO()
precsv.to_csv(data, index=False, header=False)
# Delete any pres found as we are essentially going to update them
if prenamelist:
for pre in pres:
db.delete(pre)
db.commit()
print("pre-import: Deleted {} pres that will re-inserted".format(len(prenamelist)))
else:
print("pre-import: File clean, no pres need to be deleted before re-insert")
try:
if processingFile is not None:
print("pre-import: Attempting to add {} to the database".format(processingFile['lastfile']))
data.seek(0)
copy_file(engine, data, ordering, Pre)
# Write out the last pre csv name so it can be restarted later without downloading all the pres.
with open('lastfile.json', 'w') as outfile:
json.dump({'lastfile': int(processingFile['lastfile'])}, outfile)
else:
data.seek(0)
copy_file(engine, data, ordering, Pre)
data.close()
print("pre-import: Chunk import successful")
except Exception as e:
print("pre-import: Error inserting into database - {}".format(e))
if processingFile is not None:
INSERTFAILS.append(processingFile['lastfile'])
else:
print("pre-import: Error processing chunk")
if __name__ == '__main__':
arguments = docopt(__doc__)
if arguments['small']:
nzedbPre()
elif arguments['large']:
largeNzedbPre()
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e288.py | 2 | 5039 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
# max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
# skip_probability=0.7,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
updates_func=momentum,
learning_rate=1e-03,
learning_rate_changes_by_iteration={
100: 5e-04,
500: 1e-04,
4000: 5e-05,
8000: 1e-05
# 3000: 5e-06,
# 4000: 1e-06,
# 10000: 5e-07,
# 50000: 1e-07
},
plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
etraiger/PCWG | plots.py | 2 | 15226 | import os
import pandas as pd
from Analysis import chckMake
np = pd.np
class MatplotlibPlotter(object):
def __init__(self,path, analysis):
self.path = path
self.analysis = analysis
def plot_multiple(self, windSpeedCol, powerCol, meanPowerCurveObj):
try:
from matplotlib import pyplot as plt
plt.ioff()
plotTitle = "Power Curve"
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][meanPowerCurveObj.powerCurveLevels['Data Count'] > 0 ].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(color='#00FF00',alpha=0.95,linestyle='--',label='Mean Power Curve')
colourmap = plt.cm.gist_ncar
colours = [colourmap(i) for i in np.linspace(0, 0.9, len(self.analysis.dataFrame[self.analysis.nameColumn].unique()))]
for i,name in enumerate(self.analysis.dataFrame[self.analysis.nameColumn].unique()):
ax = self.analysis.dataFrame[self.analysis.dataFrame[self.analysis.nameColumn] == name].plot(ax = ax, kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.2, label=name, color = colours[i])
ax.legend(loc=4, scatterpoints = 1)
ax.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(windSpeedCol + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
file_out = self.path + "/Multiple Dataset PowerCurve - " + powerCol + " vs " + windSpeedCol + ".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a power curve scatter chart for multiple data source (%s). Couldn't." % meanPowerCurveObj.name
def plotPowerCurveSensitivityVariationMetrics(self):
try:
from matplotlib import pyplot as plt
plt.ioff()
(self.analysis.powerCurveSensitivityVariationMetrics*100.).plot(kind = 'bar', title = 'Summary of Power Curve Variation by Variable. Significance Threshold = %.2f%%' % (self.analysis.sensitivityAnalysisThreshold * 100), figsize = (12,8))
plt.ylabel('Variation Metric (%)')
file_out = self.path + os.sep + 'Power Curve Sensitivity Analysis Variation Metric Summary.png'
plt.savefig(file_out)
plt.close('all')
except:
print "Tried to plot summary of Power Curve Sensitivity Analysis Variation Metric. Couldn't."
self.analysis.powerCurveSensitivityVariationMetrics.to_csv(self.path + os.sep + 'Power Curve Sensitivity Analysis Variation Metric.csv')
def plotPowerCurveSensitivity(self, sensCol):
try:
df = self.analysis.powerCurveSensitivityResults[sensCol].reset_index()
from matplotlib import pyplot as plt
plt.ioff()
fig = plt.figure(figsize = (12,5))
fig.suptitle('Power Curve Sensitivity to %s' % sensCol)
ax1 = fig.add_subplot(121)
ax1.hold(True)
ax2 = fig.add_subplot(122)
ax2.hold(True)
power_column = self.analysis.measuredTurbulencePower if self.analysis.turbRenormActive else self.analysis.actualPower
for label in self.analysis.sensitivityLabels.keys():
filt = df['Bin'] == label
ax1.plot(df['Wind Speed Bin'][filt], df[power_column][filt], label = label, color = self.analysis.sensitivityLabels[label])
ax2.plot(df['Wind Speed Bin'][filt], df['Energy Delta MWh'][filt], label = label, color = self.analysis.sensitivityLabels[label])
ax1.set_xlabel('Wind Speed (m/s)')
ax1.set_ylabel('Power (kW)')
ax2.set_xlabel('Wind Speed (m/s)')
ax2.set_ylabel('Energy Difference from Mean (MWh)')
box1 = ax1.get_position()
box2 = ax2.get_position()
ax1.set_position([box1.x0 - 0.05 * box1.width, box1.y0 + box1.height * 0.17,
box1.width * 0.95, box1.height * 0.8])
ax2.set_position([box2.x0 + 0.05 * box2.width, box2.y0 + box2.height * 0.17,
box2.width * 1.05, box2.height * 0.8])
handles, labels = ax1.get_legend_handles_labels()
fig.legend(handles, labels, loc='lower center', ncol = len(self.analysis.sensitivityLabels.keys()), fancybox = True, shadow = True)
file_out = self.path + os.sep + 'Power Curve Sensitivity to %s.png' % sensCol
chckMake(self.path)
fig.savefig(file_out)
plt.close()
except:
print "Tried to make a plot of power curve sensitivity to %s. Couldn't." % sensCol
def plotBy(self,by,variable,df):
import turbine
if not isinstance(df,turbine.PowerCurve):
kind = 'scatter'
else:
kind = 'line'
df=df.powerCurveLevels[df.powerCurveLevels['Input Hub Wind Speed'] <= self.analysis.allMeasuredPowerCurve.cutOutWindSpeed]
try:
from matplotlib import pyplot as plt
plt.ioff()
ax = df.plot(kind=kind,x=by ,y=variable,title=variable+" By " +by,alpha=0.6,legend=None)
ax.set_xlim([df[by].min()-1,df[by].max()+1])
ax.set_xlabel(by)
ax.set_ylabel(variable)
file_out = self.path + "/"+variable.replace(" ","_")+"_By_"+by.replace(" ","_")+".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a " + variable.replace(" ","_") + "_By_"+by.replace(" ","_")+" chart. Couldn't."
def plotPowerCurve(self, windSpeedCol, powerCol, meanPowerCurveObj, anon = False, row_filt = None, fname = None, show_analysis_pc = True, mean_title = 'Mean Power Curve', mean_pc_color = '#00FF00'):
try:
from matplotlib import pyplot as plt
plt.ioff()
df = self.analysis.dataFrame.loc[row_filt, :] if row_filt is not None else self.analysis.dataFrame
if (windSpeedCol == self.analysis.densityCorrectedHubWindSpeed) or ((windSpeedCol == self.analysis.inputHubWindSpeed) and (self.analysis.densityCorrectionActive)):
plotTitle = "Power Curve (corrected to {dens} kg/m^3)".format(dens=self.analysis.referenceDensity)
else:
plotTitle = "Power Curve"
ax = df.plot(kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.15, label='Filtered Data')
if self.analysis.specifiedPowerCurve is not None:
has_spec_pc = len(self.analysis.specifiedPowerCurve.powerCurveLevels.index) != 0
else:
has_spec_pc = False
if has_spec_pc:
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
if self.analysis.specifiedPowerCurve != self.analysis.powerCurve:
if ((self.analysis.powerCurve.name != 'All Measured') and show_analysis_pc):
ax = self.analysis.powerCurve.powerCurveLevels.sort_index()['Actual Power'].plot(ax = ax, color='#A37ACC',alpha=0.9,label=self.analysis.powerCurve.name)
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][self.analysis.allMeasuredPowerCurve.powerCurveLevels.loc[meanPowerCurveObj.powerCurveLevels.index, 'Data Count'] > 0].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(ax = ax,color=mean_pc_color,alpha=0.95,linestyle='--',
label=mean_title)
ax.legend(loc=4, scatterpoints = 1)
if has_spec_pc:
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
else:
ax.set_xlim([min(df[windSpeedCol].min(),meanPowerCurve.index.min()), max(df[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(self.analysis.inputHubWindSpeedSource + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
if anon:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fname = ("PowerCurve - " + powerCol + " vs " + windSpeedCol + ".png") if fname is None else fname
file_out = self.path + os.sep + fname
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
raise
print "Tried to make a power curve scatter chart for %s. Couldn't." % meanPowerCurveObj.name
def plotTurbCorrectedPowerCurve(self, windSpeedCol, powerCol, meanPowerCurveObj):
try:
from matplotlib import pyplot as plt
plt.ioff()
if (windSpeedCol == self.analysis.densityCorrectedHubWindSpeed) or ((windSpeedCol == self.analysis.inputHubWindSpeed) and (self.analysis.densityCorrectionActive)):
plotTitle = "Power Curve (corrected to {dens} kg/m^3)".format(dens=self.analysis.referenceDensity)
else:
plotTitle = "Power Curve"
ax = self.analysis.dataFrame.plot(kind='scatter', x=windSpeedCol, y=powerCol, title=plotTitle, alpha=0.15, label='Filtered Data')
if self.analysis.specifiedPowerCurve is not None:
has_spec_pc = len(self.analysis.specifiedPowerCurve.powerCurveLevels.index) != 0
else:
has_spec_pc = False
if has_spec_pc:
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
meanPowerCurve = meanPowerCurveObj.powerCurveLevels[[windSpeedCol,powerCol,'Data Count']][self.analysis.allMeasuredPowerCurve.powerCurveLevels['Data Count'] > 0 ].reset_index().set_index(windSpeedCol)
ax = meanPowerCurve[powerCol].plot(ax = ax,color='#00FF00',alpha=0.95,linestyle='--',
label='Mean Power Curve')
ax2 = ax.twinx()
if has_spec_pc:
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
ax2.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
else:
ax.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax2.set_xlim([min(self.analysis.dataFrame[windSpeedCol].min(),meanPowerCurve.index.min()), max(self.analysis.dataFrame[windSpeedCol].max(),meanPowerCurve.index.max()+2.0)])
ax.set_xlabel(self.analysis.inputHubWindSpeedSource + ' (m/s)')
ax.set_ylabel(powerCol + ' (kW)')
refTurbCol = 'Specified Turbulence' if self.analysis.powerCurveMode == 'Specified' else self.analysis.hubTurbulence
ax2.plot(self.analysis.powerCurve.powerCurveLevels.sort_index().index, self.analysis.powerCurve.powerCurveLevels.sort_index()[refTurbCol] * 100., 'm--', label = 'Reference TI')
ax2.set_ylabel('Reference TI (%)')
h1, l1 = ax.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax.legend(h1+h2, l1+l2, loc=4, scatterpoints = 1)
file_out = self.path + "/PowerCurve TI Corrected - " + powerCol + " vs " + windSpeedCol + ".png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a TI corrected power curve scatter chart for %s. Couldn't." % meanPowerCurveObj.name
def plotPowerLimits(self):
try:
from matplotlib import pyplot as plt
plt.ioff()
windSpeedCol = self.analysis.densityCorrectedHubWindSpeed
ax = self.analysis.dataFrame.plot(kind='scatter',x=windSpeedCol,y=self.analysis.actualPower ,title="Power Values Corrected to {dens} kg/m^3".format(dens=self.analysis.referenceDensity),alpha=0.5,label='Power Mean')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power Min",alpha=0.2,label='Power Min',color = 'orange')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power Max",alpha=0.2,label='Power Max',color = 'green')
ax = self.analysis.dataFrame.plot(ax=ax,kind='scatter',x=windSpeedCol,y="Power SD",alpha=0.2,label='Power SD',color = 'purple')
ax = self.analysis.specifiedPowerCurve.powerCurveLevels.sort_index()['Specified Power'].plot(ax = ax, color='#FF0000',alpha=0.9,label='Specified')
ax.set_xlim([self.analysis.specifiedPowerCurve.powerCurveLevels.index.min(), self.analysis.specifiedPowerCurve.powerCurveLevels.index.max()+2.0])
ax.legend(loc=4, scatterpoints = 1)
ax.set_xlabel(windSpeedCol)
ax.set_ylabel("Power [kW]")
file_out = self.path + "/PowerValues.png"
chckMake(self.path)
plt.savefig(file_out)
plt.close()
return file_out
except:
print "Tried to make a full power scatter chart. Couldn't."
def plotCalibrationSectors(self):
for datasetConf in self.analysis.datasetConfigs:
try:
from matplotlib import pyplot as plt
plt.ioff()
df = datasetConf.data.calibrationCalculator.calibrationSectorDataframe[['pctSpeedUp','LowerLimit','UpperLimit']].rename(columns={'pctSpeedUp':'% Speed Up','LowerLimit':"IEC Lower",'UpperLimit':"IEC Upper"})
df.plot(kind = 'line', title = 'Variation of wind speed ratio with direction', figsize = (12,8))
plt.ylabel('Wind Speed Ratio (Vturb/Vref) as %')
file_out = self.path + os.sep + 'Wind Speed Ratio with Direction - All Sectors {nm}.png'.format(nm=datasetConf.name)
plt.savefig(file_out)
df = df.loc[np.logical_and(df.index > datasetConf.data.fullDataFrame[datasetConf.data.referenceDirectionBin].min()-5.0 , df.index < datasetConf.data.fullDataFrame[datasetConf.data.referenceDirectionBin].max()+5.0),:]
df.plot(kind = 'line', title = 'Variation of wind speed ratio with direction', figsize = (12,8))
plt.ylabel('Wind Speed Ratio (Vturb/Vref) as %')
file_out = self.path + os.sep + 'Wind Speed Ratio with Direction - Selected Sectors {nm}.png'.format(nm=datasetConf.name)
chckMake(self.path)
plt.savefig(file_out)
plt.close('all')
except:
print "Tried to plot variation of wind speed ratio with direction. Couldn't."
| mit |
herberthudson/pynance | pynance/common.py | 2 | 5485 | """
.. Copyright (c) 2014, 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Common - generic functions (:mod:`pynance.common`)
==================================================
.. currentmodule:: pynance.common
"""
import pandas as pd
def featurize(equity_data, n_sessions, **kwargs):
"""
Generate a raw (unnormalized) feature set from the input data.
The value at `column` on the given date is taken
as a feature, and each row contains values for n_sessions
Parameters
-----------
equity_data : DataFrame
data from which to generate features
n_sessions : int
number of sessions to use as features
selection : str, default: 'Adj Close'
column of `equity_data` from which to generate features.
columns : list, default: ``map(str, range((-n_sessions + 1), 1))``
column names for output DataFrame. Default will look like:
['-5', '-4', '-3', '-2', '-1', '0'].
Returns
----------
out : DataFrame
Each row is a sequence of `n_sessions` session values where
the last column matches the value on the date specified by
the DataFrame index.
Examples
--------
>>> pn.featurize(equity_data, n_sessions, **kwargs)
"""
#Benchmarking
#>>> s = 'from __main__ import data\nimport datetime as dt\n'
#>>> timeit.timeit('data.featurize(data.get("ge", dt.date(1960, 1, 1),
# dt.date(2014, 12, 31)), 256)', setup=s, number=1)
#1.6771750450134277
columns = kwargs.get('columns', map(str, range(-n_sessions + 1, 1)))
selection = kwargs.get('selection', 'Adj Close')
# empty DataFrame with desired index and column labels
features = pd.DataFrame(index=equity_data.index[(n_sessions - 1):],
columns=columns, dtype='float64')
values = equity_data[selection].values
for i in range(n_sessions - 1):
features.iloc[:, i] = values[i:(-n_sessions + i + 1)]
features.iloc[:, n_sessions - 1] = values[(n_sessions - 1):]
return features
def decorate(fn, *args, **kwargs):
"""
Return a new function that replicates the behavior of the input
but also returns an additional value. Used for creating functions
of the proper type to pass to `labeledfeatures()`.
Parameters
----------
fn : function
*args : any
Additional parameters that the returned function will return
**kwargs : dict
Each element in `kwargs` will become an attribute of the output
function.
Returns
----------
wrapped : function
New function that acts like `fn` except that it also returns
an additional value.
Examples
----------
>>> from functools import partial
>>> forecast_interval = 32
>>> features, labels = pn.data.labeledfeatures(eqdata, 256, featurefn,
... decorate(partial(pn.data.lab.growth, forecast_interval, 'Adj Close'), forecast_interval))
>>> def f():
... return 0, 1
...
>>> pn.decorate(f, 3, 4, 5)()
(0, 1, 3, 4, 5)
>>> pn.decorate(lambda x: x * .5, 3, 4, 5)(1.)
(1., 3, 4, 5)
>>> pn.decorate(lambda x: x, 1 2)('foo')
('foo', 1, 2)
>>> pn.decorate(f, 'foo'):
(0, 1, 'foo')
pn.decorate(f, 0, foo='bar').foo
>>> 'bar'
Notes
----------
If `fn` returns multiple values, these will be returned in sequence
as the first values returned by `add_rets(fn, arg0, arg1, arg2)`. See example
above.
"""
def _wrapper(*_args, **kwargs):
_ret = fn(*_args, **kwargs)
if isinstance(_ret, tuple):
return _ret + args
if len(args) == 0:
return _ret
return (_ret,) + args
for key, value in kwargs.items():
_wrapper.__dict__[key] = value
return _wrapper
def expand(fn, col, inputtype=pd.DataFrame):
"""
Wrap a function applying to a single column to make a function
applying to a multi-dimensional dataframe or ndarray
Parameters
----------
fn : function
Function that applies to a series or vector.
col : str or int
Index of column to which to apply `fn`.
inputtype : class or type
Type of input to be expected by the wrapped function.
Normally pd.DataFrame or np.ndarray. Defaults to pd.DataFrame.
Returns
----------
wrapped : function
Function that takes an input of type `inputtype` and applies
`fn` to the specified `col`.
"""
if inputtype == pd.DataFrame:
if isinstance(col, int):
def _wrapper(*args, **kwargs):
return fn(args[0].iloc[:, col], *args[1:], **kwargs)
return _wrapper
def _wrapper(*args, **kwargs):
return fn(args[0].loc[:, col], *args[1:], **kwargs)
return _wrapper
elif inputtype == np.ndarray:
def _wrapper(*args, **kwargs):
return fn(args[0][:, col], *args[1:], **kwargs)
return _wrapper
raise TypeError("invalid input type")
def has_na(eqdata):
"""
Return false if `eqdata` contains no missing values.
Parameters
----------
eqdata : DataFrame or ndarray
Data to check for missing values (NaN, None)
Returns
----------
answer : bool
False iff `eqdata` contains no missing values.
"""
if isinstance(eqdata, pd.DataFrame):
_values = eqdata.values
else:
_values = eqdata
return len(_values[pd.isnull(_values)]) > 0
| mit |
herilalaina/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 112 | 3203 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
BillyLiggins/fitting | first.py | 1 | 7031 | import copy
import echidna
import echidna.output.plot as plot
import echidna.core.spectra as spectra
from echidna.output import store
import matplotlib.pyplot as plt
import argparse
import glob
import numpy as np
import os
def convertor(path):
flist=np.array(glob.glob(path))
for ntuple in flist:
os.system("python ~/echidna/echidna/scripts/dump_spectra_ntuple.py -c ~/workspace/PhD/fitting/config.yml -f "+ str(ntuple)+" -s hdf5/")
def combinerNtuple(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.fill_from_ntuple(hdf5)
first = False
else:
spectrum2 = store.fill_from_ntuple(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
def combiner(path,filename):
flist=np.array(glob.glob(path))
print flist
first = True
for hdf5 in flist:
print hdf5
if first:
spectrum1 = store.load(hdf5)
first = False
else:
spectrum2 = store.load(hdf5)
spectrum1.add(spectrum2)
store.dump(filename, spectrum1)
"""The way you should do it is to define a lot of spectra and then plot them.
You don't really know how to normlise the histrogram or indeed weather that is of any uses in the first
place.
"""
def slicer(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 0.6,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name=str(i*1000)+"mm to "+str((i+1)*1000)+"mm"
print type(spec2)
filler.append(spec2)
def slicerMC(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_mc_low": 0.,
"energy_mc_high": 1,
"radial_mc_low": i*6000.0/nslice,
"radial_mc_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="MC"
print type(spec2)
print "This gives the number os events in each window:"
print "mc : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def slicerReco(spectrumPath,filler,nslice):
for i in range(nslice):
spectrum=store.load(spectrumPath)
print spectrum.sum()
shrink_dict = {"energy_reco_low": 0.,
"energy_reco_high": 1.,
"radial_reco_low": i*6000.0/nslice,
"radial_reco_high": (i+1)*6000/nslice}
spectrum.cut(**shrink_dict)
spectrum.scale(1)
spec2=copy.copy(spectrum)
spec2._name="Reco"
print type(spec2)
print "This gives the number os events in each window:"
print "reco : "+str(i*6000.0/nslice)+"mm to "+str((i+1)*6000.0/nslice)+"mm : "+str(spec2.sum())
filler.append(spec2)
def signalPlotter(spectra,dim,name):
i=0
for spec in spectra:
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel=str(dim)+" [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project(dim),histtype="stepfilled", color="RoyalBlue",label=spec._name)
fig.savefig("slice_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
i=1+i
def combiPlotter(spectra,dim,name):
i=0
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
for spec in spectra:
par = spec.get_config().get_par(dim)
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
plt.xlabel(str(dim)+ " [" + par.get_unit() + "]")
plt.ylabel("Events per " + str(width) + " " + par.get_unit() + " bin")
ax.set(title="Normalised energy spectrum in 1000mm slices",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="energy_reco"+ " [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spec.project("energy_reco"),label=spec._name,histtype='step')
ax.set_ylim([0,0.03])
ax.set_xlim([0.2,0.7])
ax.legend(loc="best")
fig.savefig("combined_"+str(name)+".png")
def func(path,nslice,name):
spectra=[]
slicer(path,spectra,nslice)
signalPlotter(spectra,"energy_reco",name)
combiPlotter(spectra,"energy_reco",name)
def po210():
convertor("po210_ntuple/*")
combiner("hdf5/SolarPo**ntuple*","hdf5/SolarPo210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarPo210_combined.hdf5",6,"po210")
def bi210():
convertor("bi210_ntuple/*")
combiner("hdf5/SolarBi**ntuple*","hdf5/SolarBi210_combined.hdf5")
plotpath="plots/"
func("hdf5/SolarBi210_combined.hdf5",6,"bi210")
def compair(spectrumPathReco,spectrumPathMC,name):
spectraReco=[]
spectraMC=[]
slicerReco(spectrumPathReco,spectraReco,6)
slicerMC(spectrumPathMC,spectraMC,6)
for i in range(0,len(spectraReco)):
fig = plt.figure()
ax= fig.add_subplot(1,1,1)
par = spectraReco[i].get_config().get_par("energy_reco")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.set(title="Normalised energy spectrum in "+str(i*1000)+"mm to "+str((i+1)*1000)+"mm ",ylabel="Events per " + str(width) + " " + par.get_unit() + " bin", xlabel="Energy [" + par.get_unit() + "]")
ax.hist(x,bins,weights=spectraReco[i].project("energy_reco"),histtype="stepfilled",label=spectraReco[i]._name)
par = spectraMC[i].get_config().get_par("energy_mc")
width = par.get_width()
bins = np.linspace(par._low,par._high, par._bins+1)
x = bins[:-1] + 0.5*width
ax.hist(x,bins,weights=spectraMC[i].project("energy_mc"),histtype="stepfilled",label=spectraMC[i]._name,alpha=0.75)
ax.legend(loc=2)
fig.savefig("compare_"+str(name)+"_"+str(i*1000)+"_"+str((i+1)*1000)+".png")
if __name__=="__main__":
print "You need to compare the recon against the mc"
print "You should bin in bigger bins becuase you could then bin in 4d"
"""You need to plot the standard spectra"""
| mit |
jobelenus/thegreco | ignore/tracegen.py | 1 | 1364 | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et:wrap:ai:fileencoding=utf-8:
import collections
#import matplotlib.pyplot as plt
factor = 1/4
class TraceGenerator():
def __init__(self):
fname='/Users/jobelenus/work/thegreco/cpu.entries'
self.fname = fname
with open(self.fname) as f:
self.lines = f.readlines()
self.cpu = map(int, self.lines)
def gen_cpu_trace(self):
return self.cpu
def gen_mem_trace(self):
self.mem = collections.deque(self.cpu)
self.mem.rotate(len(self.cpu)/4)
return self.mem
def gen_disk_trace(self):
self.disk = collections.deque(self.cpu)
self.disk.rotate(2*len(self.cpu)/4)
return self.disk
def gen_net_trace(self):
self.net = collections.deque(self.cpu)
self.net.rotate(3*len(self.cpu)/4)
return self.net
def gen_trace(self):
self.gen_cpu_trace()
self.gen_mem_trace()
self.gen_disk_trace()
self.gen_net_trace()
self.trace = zip(self.cpu, self.mem, self.disk, self.net)
return self.trace
#tg = TraceGenerator()
#cpu = tg.gen_cpu_trace()
#mem = tg.gen_mem_trace()
#disk = tg.gen_disk_trace()
#net = tg.gen_net_trace()
#trace = zip(cpu, mem, disk, net)
#print trace
#plt.bar(range(0,len(cpu)), cpu)
#plt.show()
| gpl-3.0 |
seckcoder/lang-learn | python/sklearn/sklearn/decomposition/tests/test_kernel_pca.py | 1 | 7069 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.utils.testing import assert_less
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=True)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
#X_pred2 = kpca.inverse_transform(X_pred_transformed)
#assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(4, eigen_solver=eigen_solver,
kernel='precomputed').fit(np.dot(X_fit,
X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(4, eigen_solver=eigen_solver,
kernel='precomputed').fit(np.dot(X_fit,
X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
"""Test the linear separability of the first 2D KPCA transform"""
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| unlicense |
fzalkow/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
mining/mining | mining/models/cube.py | 4 | 3819 | # -*- coding: utf-8 -*-
import gc
import pandas
from datetime import datetime
from pandas import DataFrame
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from sqlalchemy.orm import sessionmaker
from mining.utils import conf, log_it
from mining.utils._pandas import fix_render
from mining.db import DataWarehouse
from bottle.ext.mongo import MongoPlugin
class Cube(object):
def __init__(self, _cube):
log_it("START: {}".format(_cube['slug']), "bin-mining")
self.mongo = MongoPlugin(
uri=conf("mongodb")["uri"],
db=conf("mongodb")["db"],
json_mongo=True).get_mongo()
try:
del _cube['_id']
except KeyError:
pass
self.cube = _cube
self.slug = self.cube['slug']
def load(self):
self.cube['run'] = 'run'
self.mongo['cube'].update({'slug': self.slug}, self.cube)
self.cube['start_process'] = datetime.now()
_sql = self.cube['sql']
if _sql[-1] == ';':
_sql = _sql[:-1]
self.sql = u"""SELECT * FROM ({}) AS CUBE;""".format(_sql)
self.connection = self.mongo['connection'].find_one({
'slug': self.cube['connection']})['connection']
log_it("CONNECT IN RELATION DATA BASE: {}".format(self.slug),
"bin-mining")
if 'sqlite' in self.connection:
e = create_engine(self.connection)
else:
e = create_engine(self.connection,
**conf('openmining')['sql_conn_params'])
Session = sessionmaker(bind=e)
session = Session()
resoverall = session.execute(text(self.sql))
self.data = resoverall.fetchall()
self.keys = resoverall.keys()
def environment(self, t):
if t not in ['relational']:
self.sql = t
def _data(self, data):
self.data = data
def _keys(self, keys):
if type(keys) == list:
self.keys = keys
self.keys = list(keys)
def frame(self, data_type=None):
log_it("LOAD DATA ON DATAWAREHOUSE via {}: {}".format(
data_type or 'dict', self.slug), "bin-mining")
if data_type:
self.df = getattr(pandas, "read_{}".format(data_type))(self.data)
else:
self.df = DataFrame(self.data)
if self.df.empty:
self.pdict = {}
log_it('[warning]Empty cube: {}!!'.format(self.cube),
"bin-mining")
return
try:
self.df.columns = self.keys
except AttributeError:
self._keys(self.df.columns.tolist())
# If the OML is active, it renders the script that there is
if conf("oml").get("on") and self.cube.get("oml"):
from oml import RunTime
self.df.columns = self.keys
df = RunTime(conf("oml").get("language", "lua"),
self.df.to_dict(orient='records'),
self.cube.get("oml"),
conf("oml").get("class", {"OML": "oml.base.OMLBase"}))
self.df = DataFrame(df)
self._keys(self.df.columns.tolist())
self.df.head()
self.pdict = map(fix_render, self.df.to_dict(orient='records'))
def save(self):
log_it("SAVE DATA (JSON) ON DATA WAREHOUSE: {}".format(self.slug),
"bin-mining")
data = {'data': self.pdict, 'columns': self.keys}
DW = DataWarehouse()
DW.save(self.slug, data)
self.cube['status'] = True
self.cube['lastupdate'] = datetime.now()
self.cube['run'] = True
self.mongo['cube'].update({'slug': self.cube['slug']}, self.cube)
log_it("CLEAN MEMORY: {}".format(self.slug), "bin-mining")
gc.collect()
| mit |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/testing/iptestcontroller.py | 7 | 21202 | # -*- coding: utf-8 -*-
"""IPython Test Process Controller
This module runs one or more subprocesses which will actually run the IPython
test suite.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import argparse
import json
import multiprocessing.pool
import os
import shutil
import signal
import sys
import subprocess
import time
from .iptest import have, test_group_names as py_test_group_names, test_sections, StreamCapturer
from IPython.utils.path import compress_user
from IPython.utils.py3compat import bytes_to_str
from IPython.utils.sysinfo import get_sys_info
from IPython.utils.tempdir import TemporaryDirectory
class TestController(object):
"""Run tests in a subprocess
"""
#: str, IPython test suite to be executed.
section = None
#: list, command line arguments to be executed
cmd = None
#: dict, extra environment variables to set for the subprocess
env = None
#: list, TemporaryDirectory instances to clear up when the process finishes
dirs = None
#: subprocess.Popen instance
process = None
#: str, process stdout+stderr
stdout = None
def __init__(self):
self.cmd = []
self.env = {}
self.dirs = []
def setup(self):
"""Create temporary directories etc.
This is only called when we know the test group will be run. Things
created here may be cleaned up by self.cleanup().
"""
pass
def launch(self, buffer_output=False):
# print('*** ENV:', self.env) # dbg
# print('*** CMD:', self.cmd) # dbg
env = os.environ.copy()
env.update(self.env)
output = subprocess.PIPE if buffer_output else None
stdout = subprocess.STDOUT if buffer_output else None
self.process = subprocess.Popen(self.cmd, stdout=output,
stderr=stdout, env=env)
def wait(self):
self.stdout, _ = self.process.communicate()
return self.process.returncode
def print_extra_info(self):
"""Print extra information about this test run.
If we're running in parallel and showing the concise view, this is only
called if the test group fails. Otherwise, it's called before the test
group is started.
The base implementation does nothing, but it can be overridden by
subclasses.
"""
return
def cleanup_process(self):
"""Cleanup on exit by killing any leftover processes."""
subp = self.process
if subp is None or (subp.poll() is not None):
return # Process doesn't exist, or is already dead.
try:
print('Cleaning up stale PID: %d' % subp.pid)
subp.kill()
except: # (OSError, WindowsError) ?
# This is just a best effort, if we fail or the process was
# really gone, ignore it.
pass
else:
for i in range(10):
if subp.poll() is None:
time.sleep(0.1)
else:
break
if subp.poll() is None:
# The process did not die...
print('... failed. Manual cleanup may be required.')
def cleanup(self):
"Kill process if it's still alive, and clean up temporary directories"
self.cleanup_process()
for td in self.dirs:
td.cleanup()
__del__ = cleanup
class PyTestController(TestController):
"""Run Python tests using IPython.testing.iptest"""
#: str, Python command to execute in subprocess
pycmd = None
def __init__(self, section, options):
"""Create new test runner."""
TestController.__init__(self)
self.section = section
# pycmd is put into cmd[2] in PyTestController.launch()
self.cmd = [sys.executable, '-c', None, section]
self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
self.options = options
def setup(self):
ipydir = TemporaryDirectory()
self.dirs.append(ipydir)
self.env['IPYTHONDIR'] = ipydir.name
self.workingdir = workingdir = TemporaryDirectory()
self.dirs.append(workingdir)
self.env['IPTEST_WORKING_DIR'] = workingdir.name
# This means we won't get odd effects from our own matplotlib config
self.env['MPLCONFIGDIR'] = workingdir.name
# From options:
if self.options.xunit:
self.add_xunit()
if self.options.coverage:
self.add_coverage()
self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
self.cmd.extend(self.options.extra_args)
@property
def will_run(self):
try:
return test_sections[self.section].will_run
except KeyError:
return True
def add_xunit(self):
xunit_file = os.path.abspath(self.section + '.xunit.xml')
self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
def add_coverage(self):
try:
sources = test_sections[self.section].includes
except KeyError:
sources = ['IPython']
coverage_rc = ("[run]\n"
"data_file = {data_file}\n"
"source =\n"
" {source}\n"
).format(data_file=os.path.abspath('.coverage.'+self.section),
source="\n ".join(sources))
config_file = os.path.join(self.workingdir.name, '.coveragerc')
with open(config_file, 'w') as f:
f.write(coverage_rc)
self.env['COVERAGE_PROCESS_START'] = config_file
self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
def launch(self, buffer_output=False):
self.cmd[2] = self.pycmd
super(PyTestController, self).launch(buffer_output=buffer_output)
js_prefix = 'js/'
def get_js_test_dir():
import IPython.html.tests as t
return os.path.join(os.path.dirname(t.__file__), '')
def all_js_groups():
import glob
test_dir = get_js_test_dir()
all_subdirs = glob.glob(test_dir + '*/')
return [js_prefix+os.path.relpath(x, test_dir) for x in all_subdirs if os.path.relpath(x, test_dir) != '__pycache__']
class JSController(TestController):
"""Run CasperJS tests """
def __init__(self, section):
"""Create new test runner."""
TestController.__init__(self)
self.section = section
js_test_dir = get_js_test_dir()
includes = '--includes=' + os.path.join(js_test_dir,'util.js')
test_cases = os.path.join(js_test_dir, self.section[len(js_prefix):])
self.cmd = ['casperjs', 'test', includes, test_cases]
def setup(self):
self.ipydir = TemporaryDirectory()
self.nbdir = TemporaryDirectory()
self.dirs.append(self.ipydir)
self.dirs.append(self.nbdir)
os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub ∂ir1', u'sub ∂ir 1a')))
os.makedirs(os.path.join(self.nbdir.name, os.path.join(u'sub ∂ir2', u'sub ∂ir 1b')))
# start the ipython notebook, so we get the port number
self.server_port = 0
self._init_server()
if self.server_port:
self.cmd.append("--port=%i" % self.server_port)
else:
# don't launch tests if the server didn't start
self.cmd = [sys.executable, '-c', 'raise SystemExit(1)']
def print_extra_info(self):
print("Running tests with notebook directory %r" % self.nbdir.name)
@property
def will_run(self):
return all(have[a] for a in ['zmq', 'tornado', 'jinja2', 'casperjs', 'sqlite3'])
def _init_server(self):
"Start the notebook server in a separate process"
self.server_command = command = [sys.executable,
'-m', 'IPython.html',
'--no-browser',
'--ipython-dir', self.ipydir.name,
'--notebook-dir', self.nbdir.name,
]
# ipc doesn't work on Windows, and darwin has crazy-long temp paths,
# which run afoul of ipc's maximum path length.
if sys.platform.startswith('linux'):
command.append('--KernelManager.transport=ipc')
self.stream_capturer = c = StreamCapturer()
c.start()
self.server = subprocess.Popen(command, stdout=c.writefd, stderr=subprocess.STDOUT)
self.server_info_file = os.path.join(self.ipydir.name,
'profile_default', 'security', 'nbserver-%i.json' % self.server.pid
)
self._wait_for_server()
def _wait_for_server(self):
"""Wait 30 seconds for the notebook server to start"""
for i in range(300):
if self.server.poll() is not None:
return self._failed_to_start()
if os.path.exists(self.server_info_file):
self._load_server_info()
return
time.sleep(0.1)
print("Notebook server-info file never arrived: %s" % self.server_info_file,
file=sys.stderr
)
def _failed_to_start(self):
"""Notebook server exited prematurely"""
captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
print("Notebook failed to start: ", file=sys.stderr)
print(self.server_command)
print(captured, file=sys.stderr)
def _load_server_info(self):
"""Notebook server started, load connection info from JSON"""
with open(self.server_info_file) as f:
info = json.load(f)
self.server_port = info['port']
def cleanup(self):
try:
self.server.terminate()
except OSError:
# already dead
pass
self.server.wait()
self.stream_capturer.halt()
TestController.cleanup(self)
def prepare_controllers(options):
"""Returns two lists of TestController instances, those to run, and those
not to run."""
testgroups = options.testgroups
if testgroups:
py_testgroups = [g for g in testgroups if (g in py_test_group_names) \
or g.startswith('IPython.')]
if 'js' in testgroups:
js_testgroups = all_js_groups()
else:
js_testgroups = [g for g in testgroups if g not in py_testgroups]
else:
py_testgroups = py_test_group_names
js_testgroups = all_js_groups()
if not options.all:
test_sections['parallel'].enabled = False
c_js = [JSController(name) for name in js_testgroups]
c_py = [PyTestController(name, options) for name in py_testgroups]
controllers = c_py + c_js
to_run = [c for c in controllers if c.will_run]
not_run = [c for c in controllers if not c.will_run]
return to_run, not_run
def do_run(controller, buffer_output=True):
"""Setup and run a test controller.
If buffer_output is True, no output is displayed, to avoid it appearing
interleaved. In this case, the caller is responsible for displaying test
output on failure.
Returns
-------
controller : TestController
The same controller as passed in, as a convenience for using map() type
APIs.
exitcode : int
The exit code of the test subprocess. Non-zero indicates failure.
"""
try:
try:
controller.setup()
if not buffer_output:
controller.print_extra_info()
controller.launch(buffer_output=buffer_output)
except Exception:
import traceback
traceback.print_exc()
return controller, 1 # signal failure
exitcode = controller.wait()
return controller, exitcode
except KeyboardInterrupt:
return controller, -signal.SIGINT
finally:
controller.cleanup()
def report():
"""Return a string with a summary report of test-related variables."""
inf = get_sys_info()
out = []
def _add(name, value):
out.append((name, value))
_add('IPython version', inf['ipython_version'])
_add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
_add('IPython package', compress_user(inf['ipython_path']))
_add('Python version', inf['sys_version'].replace('\n',''))
_add('sys.executable', compress_user(inf['sys_executable']))
_add('Platform', inf['platform'])
width = max(len(n) for (n,v) in out)
out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
avail = []
not_avail = []
for k, is_avail in have.items():
if is_avail:
avail.append(k)
else:
not_avail.append(k)
if avail:
out.append('\nTools and libraries available at test time:\n')
avail.sort()
out.append(' ' + ' '.join(avail)+'\n')
if not_avail:
out.append('\nTools and libraries NOT available at test time:\n')
not_avail.sort()
out.append(' ' + ' '.join(not_avail)+'\n')
return ''.join(out)
def run_iptestall(options):
"""Run the entire IPython test suite by calling nose and trial.
This function constructs :class:`IPTester` instances for all IPython
modules and package and then runs each of them. This causes the modules
and packages of IPython to be tested each in their own subprocess using
nose.
Parameters
----------
All parameters are passed as attributes of the options object.
testgroups : list of str
Run only these sections of the test suite. If empty, run all the available
sections.
fast : int or None
Run the test suite in parallel, using n simultaneous processes. If None
is passed, one process is used per CPU core. Default 1 (i.e. sequential)
inc_slow : bool
Include slow tests, like IPython.parallel. By default, these tests aren't
run.
xunit : bool
Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
coverage : bool or str
Measure code coverage from tests. True will store the raw coverage data,
or pass 'html' or 'xml' to get reports.
extra_args : list
Extra arguments to pass to the test subprocesses, e.g. '-v'
"""
to_run, not_run = prepare_controllers(options)
def justify(ltext, rtext, width=70, fill='-'):
ltext += ' '
rtext = (' ' + rtext).rjust(width - len(ltext), fill)
return ltext + rtext
# Run all test runners, tracking execution time
failed = []
t_start = time.time()
print()
if options.fast == 1:
# This actually means sequential, i.e. with 1 job
for controller in to_run:
print('Test group:', controller.section)
sys.stdout.flush() # Show in correct order when output is piped
controller, res = do_run(controller, buffer_output=False)
if res:
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
print()
else:
# Run tests concurrently
try:
pool = multiprocessing.pool.ThreadPool(options.fast)
for (controller, res) in pool.imap_unordered(do_run, to_run):
res_string = 'OK' if res == 0 else 'FAILED'
print(justify('Test group: ' + controller.section, res_string))
if res:
controller.print_extra_info()
print(bytes_to_str(controller.stdout))
failed.append(controller)
if res == -signal.SIGINT:
print("Interrupted")
break
except KeyboardInterrupt:
return
for controller in not_run:
print(justify('Test group: ' + controller.section, 'NOT RUN'))
t_end = time.time()
t_tests = t_end - t_start
nrunners = len(to_run)
nfail = len(failed)
# summarize results
print('_'*70)
print('Test suite completed for system with the following information:')
print(report())
took = "Took %.3fs." % t_tests
print('Status: ', end='')
if not failed:
print('OK (%d test groups).' % nrunners, took)
else:
# If anything went wrong, point out what command to rerun manually to
# see the actual errors and individual summary
failed_sections = [c.section for c in failed]
print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
nrunners, ', '.join(failed_sections)), took)
print()
print('You may wish to rerun these, with:')
print(' iptest', *failed_sections)
print()
if options.coverage:
from coverage import coverage
cov = coverage(data_file='.coverage')
cov.combine()
cov.save()
# Coverage HTML report
if options.coverage == 'html':
html_dir = 'ipy_htmlcov'
shutil.rmtree(html_dir, ignore_errors=True)
print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
sys.stdout.flush()
# Custom HTML reporter to clean up module names.
from coverage.html import HtmlReporter
class CustomHtmlReporter(HtmlReporter):
def find_code_units(self, morfs):
super(CustomHtmlReporter, self).find_code_units(morfs)
for cu in self.code_units:
nameparts = cu.name.split(os.sep)
if 'IPython' not in nameparts:
continue
ix = nameparts.index('IPython')
cu.name = '.'.join(nameparts[ix:])
# Reimplement the html_report method with our custom reporter
cov._harvest_data()
cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
html_title='IPython test coverage',
)
reporter = CustomHtmlReporter(cov, cov.config)
reporter.report(None)
print('done.')
# Coverage XML report
elif options.coverage == 'xml':
cov.xml_report(outfile='ipy_coverage.xml')
if failed:
# Ensure that our exit code indicates failure
sys.exit(1)
argparser = argparse.ArgumentParser(description='Run IPython test suite')
argparser.add_argument('testgroups', nargs='*',
help='Run specified groups of tests. If omitted, run '
'all tests.')
argparser.add_argument('--all', action='store_true',
help='Include slow tests not run by default.')
argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
help='Run test sections in parallel. This starts as many '
'processes as you have cores, or you can specify a number.')
argparser.add_argument('--xunit', action='store_true',
help='Produce Xunit XML results')
argparser.add_argument('--coverage', nargs='?', const=True, default=False,
help="Measure test coverage. Specify 'html' or "
"'xml' to get reports.")
argparser.add_argument('--subproc-streams', default='capture',
help="What to do with stdout/stderr from subprocesses. "
"'capture' (default), 'show' and 'discard' are the options.")
def default_options():
"""Get an argparse Namespace object with the default arguments, to pass to
:func:`run_iptestall`.
"""
options = argparser.parse_args([])
options.extra_args = []
return options
def main():
# iptest doesn't work correctly if the working directory is the
# root of the IPython source tree. Tell the user to avoid
# frustration.
if os.path.exists(os.path.join(os.getcwd(),
'IPython', 'testing', '__main__.py')):
print("Don't run iptest from the IPython source directory",
file=sys.stderr)
sys.exit(1)
# Arguments after -- should be passed through to nose. Argparse treats
# everything after -- as regular positional arguments, so we separate them
# first.
try:
ix = sys.argv.index('--')
except ValueError:
to_parse = sys.argv[1:]
extra_args = []
else:
to_parse = sys.argv[1:ix]
extra_args = sys.argv[ix+1:]
options = argparser.parse_args(to_parse)
options.extra_args = extra_args
run_iptestall(options)
if __name__ == '__main__':
main()
| apache-2.0 |
pprett/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/widgets/slider_demo.py | 13 | 1179 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
| mit |
heliopython/heliopy | doc/source/conf.py | 1 | 11504 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# heliopy documentation build configuration file, created by
# sphinx-quickstart
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
"""
import os
import sys
import unittest.mock as mock
import matplotlib
import heliopy
matplotlib.use('agg')
sys.path.insert(0, os.path.abspath('../../'))
html_favicon = '../../artwork/favicon.ico'
html_sidebars = {'**': ['docsidebar.html']}
# Pretend these modules exits so readthedocs builds
MOCK_MODULES = []
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery',
'sphinx_automodapi.automodapi',
'sphinx_issues'
]
intersphinx_mapping = {
'matplotlib': ('https://matplotlib.org', None),
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'astropy': ('https://docs.astropy.org/en/stable', None),
'sunpy': ('https://docs.sunpy.org/en/stable', None)}
sphinx_gallery_conf = {
'default_thumb_file': os.path.abspath(os.path.join('..', '..', 'artwork', 'logo_circle.png')),
'examples_dirs': '../../examples',
'gallery_dirs': 'auto_examples',
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('sphinx_gallery', 'heliopy'),
'min_reported_time': 0,
'abort_on_example_error': False,
}
issues_github_path = 'heliopython/heliopy'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HelioPy'
author = 'David Stansby'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = heliopy.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'heliopy_theme'
html_theme_path = ['../']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {'canonical_url': 'http://docs.heliopy.org/en/stable/',
# 'analytics_id': 'UA-112461508-1',
# 'prev_next_buttons_location': 'None'}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'heliopy v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'heliopydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'heliopy.tex', 'HelioPy Documentation',
'David Stansby', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'HelioPy', 'HelioPy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
'''texinfo_documents = [
(master_doc, 'HelioPy', 'HelioPy Documentation',
author, 'HelioPy team', 'Python for space physics.',
'Miscellaneous'),
]
html_theme_options = {
"about_links": [
("About", "http://docs.heliopy.org/en/stable/guide/about.html", 1),
(
"Acknowledge HelioPy",
"http://docs.heliopy.org/en/stable/guide/citing.html",
1,
),
("Code of Conduct", "http://docs.heliopy.org/en/stable/guide/code-of-conduct.html", 1),
],
"navbar_links": [
("Documentation", "http://docs.heliopy.org/en/stable/index.html", 1),
("Get Help", "http://docs.heliopy.org/en/stable/index.html", 1),
],
}'''
| gpl-3.0 |
tfwillems/STRValidator | pedigree_analysis.py | 1 | 23183 | import matplotlib as mpl
mpl.use('Agg')
import collections
import sys
import numpy
import matplotlib.pyplot as plt
import vcf
from matplotlib.backends.backend_pdf import PdfPages
from fractions import Fraction
class TRIO:
def __init__(self, child, mother, father):
self.child = child
self.mother = mother
self.father = father
def __str__(self):
return "%s\t%s\t%s"%(self.child, self.mother, self.father)
class FATHER_SON_PAIR:
def __init__(self, son, father):
self.son = son
self.father = father
def __str__(self):
return "%s\t%s"%(self.son, self.father)
def read_1kg_pedigree_file(input_file, header=True):
data = open(input_file, "r")
if header:
data.readline()
trios, father_son_pairs = [], []
for line in data:
tokens = line.strip().split()
if tokens[2] != "0" and tokens[3] != "0":
child, dad, mom = tokens[1:4]
trios.append(TRIO(child, dad, mom))
if tokens[2] != "0" and tokens[4] == "1":
father_son_pairs.append(FATHER_SON_PAIR(tokens[1], tokens[2]))
data.close()
print("There are %d trios and %d father-son-pairs in the pedigree file"%(len(trios), len(father_son_pairs)))
return trios, father_son_pairs
# Find the index for the highest bin which is less than
# or equal to the provided value
def find_index(bins, value):
low = 0
high = len(bins)-1
while high > low + 1:
midval = bins[(low+high)/2]
if value > midval:
low = (low+high)/2
elif value < midval:
high = (low+high)/2 - 1
else:
return (low+high)/2
if value < bins[low]:
exit("Unable to find index. Exiting...")
if value >= bins[high]:
return high
else:
return low
def is_discordant(a11, a12, a21, a22):
if (a11 == a21 and a12 == a22) or (a11 == a22 and a12 == a21):
return False
else:
return True
def is_mendelian(a11, a12, a21, a22, a31, a32):
if (a31 == a11 or a31 == a12) and (a32 == a21 or a32 == a22):
return True
elif (a31 == a21 or a31 == a22) and (a32 == a11 or a32 == a12):
return True
else:
return False
def draw_bp_histogram(discordant_counts, pdfpage):
# Create histogram of father-son differences
bp_diff_counts = [collections.defaultdict(int) for _ in xrange(6)]
repeat_diff_counts = [collections.defaultdict(int) for _ in xrange(6)]
out_frame_count = 0
in_frame_count = 0
for key,val in discordant_counts.items():
bp_diff_counts[key[2]-1][key[1]-key[0]] += val
repeat_diff_counts[key[2]-1][Fraction(key[1]-key[0], key[2])] += val
for xlabel,diff_counts,in_frame in zip(["bps", "repeats"],
[bp_diff_counts, repeat_diff_counts],
[lambda bp,period: bp%period == 0, lambda rep,period: int(rep)==float(rep) ]):
fig = plt.figure()
ax = fig.add_subplot(111)
diffs = sorted(list(set(reduce(lambda x,y:x+y, map(lambda z: z.keys(), diff_counts)))))
colors = ['c', 'r', 'g', 'y', 'b', 'm']
heights = numpy.zeros(len(diffs))
for i in xrange(6):
vals = [diff_counts[i][x] for x in diffs]
if sum(vals) == 0:
continue
in_frame_trips = filter(lambda x: in_frame(x[0], i+1), zip(diffs, vals, heights))
out_frame_trips = filter(lambda x: not in_frame(x[0], i+1), zip(diffs, vals, heights))
if len(in_frame_trips) != 0:
x,y,h = zip(*in_frame_trips)
in_frame_count += sum(y)
ax.bar(x, y, bottom=h, align='center', color=colors[i], width=0.25, label=str(i+1))
if len(out_frame_trips) != 0:
x,y,h = zip(*out_frame_trips)
out_frame_count += sum(y)
ax.bar(x, y, bottom=h, align='center', color=colors[i], width=0.25, label=str(i+1), hatch='//')
heights += vals
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xlabel(r"$father-son ("+xlabel+")$")
ax.set_ylabel(r"$n_{calls}$")
ax.legend()
pdfpage.savefig(fig)
print("IN FRAME=%d, OUT FRAME=%d"%(in_frame_count/2, out_frame_count/2))
class CHRY_STATS:
def __init__(self, father_son_pairs, call_output):
self.pairs = father_son_pairs
self.output_calls = open(call_output, "w")
def initialize(self, vcf_reader):
sample_indices = dict(zip(vcf_reader.samples, range(len(vcf_reader.samples))))
self.pair_indices = []
for i in xrange(len(self.pairs)):
if self.pairs[i].son not in sample_indices:
exit("Unable to assess chrY inheritance because no data was found for " + self.pairs[i].son)
if self.pairs[i].father not in sample_indices:
exit("Unable to assess chrY inheritance because no data was found for " + self.pairs[i].father)
self.pair_indices.append([sample_indices[self.pairs[i].father], sample_indices[self.pairs[i].son]])
self.missing_data_skip_counts = numpy.zeros(len(self.pair_indices))
self.het_gt_skip_counts = numpy.zeros(len(self.pair_indices))
self.num_concordant = 0
self.num_discordant = 0
self.pair_info = {}
self.discordant_counts = collections.defaultdict(int)
self.call_count = 0
def process_record(self, record):
motif_len = len(record.INFO['MOTIF'])
for i in xrange(len(self.pair_indices)):
if any(map(lambda x: record.samples[x]['GT'] is None, self.pair_indices[i])):
self.missing_data_skip_counts[i] += 1
continue
self.call_count += 1
father = record.samples[self.pair_indices[i][0]]
son = record.samples[self.pair_indices[i][1]]
gb_1a, gb_1b = map(int, father['GB'].split("/"))
gb_2a, gb_2b = map(int, son['GB'].split("/"))
self.output_calls.write("%d\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\t%s\n"%(self.call_count, record.CHROM, record.POS, record.INFO['END'],
gb_1a + gb_1b, gb_2a + gb_2b,
gb_1a, gb_1b, gb_2a, gb_2b, father.sample, son.sample))
if gb_1a != gb_1b or gb_2a != gb_2b:
self.het_gt_skip_counts[i] += 1
if gb_1a != gb_1b:
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"], father.sample, str(gb_1a) + "|" + str(gb_1b), "HET"))
if gb_2a != gb_2b:
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"], father.sample, str(gb_2a) + "|" + str(gb_2b), "HET"))
continue
if gb_1a != gb_2a:
self.num_discordant += 1
self.discordant_counts[(gb_1a, gb_2a, motif_len)] +=1
print("chrY\t%d\t%d\t%s\t%s\t%s"%(record.POS, record.INFO["END"],
father.sample + "," + son.sample,
str(gb_1a) + "," + str(gb_2b), "DISCORDANT"))
else:
self.num_concordant += 1
if (gb_1a, gb_2a) not in self.pair_info:
self.pair_info[(gb_1a, gb_2a)] = []
self.pair_info[(gb_1a, gb_2a)].append((record.CHROM, record.POS, record.INFO['END'], father.sample+"-"+son.sample))
def finish(self, pdfpage, output_prefix):
print("WARNING: Skipped " + str(self.missing_data_skip_counts) + " comparisons due to missing data for one or more individuals")
print("WARNING: Skipped " + str(self.het_gt_skip_counts) + " comparisons due to heterozygous genotypes for one or more individuals")
if self.num_discordant + self.num_concordant != 0:
print("%d vs. %d = %f Percent"%(self.num_discordant, self.num_concordant, 100.0*self.num_discordant/(self.num_discordant+self.num_concordant)))
else:
print("WARNING: No chrY calls were applicable for comparison")
# Create bubble plot using all data
fig = plt.figure()
ax = fig.add_subplot(111)
x, y = zip(*self.pair_info.keys())
s = numpy.array(map(len, self.pair_info.values()))*10
ax.scatter(x, y, s=s, alpha=0.7)
ax.set_xlabel("Father's genotype (bp)")
ax.set_ylabel("Son's genotype (bp)")
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.plot(numpy.arange(min(x)-5, max(x)+5, 1.0), numpy.arange(min(y)-5, max(y)+5, 1.0), linestyle='--', color='k')
pdfpage.savefig(fig)
# Create histogram of father-son differences
draw_bp_histogram(self.discordant_counts, pdfpage)
viz_output = open(output_prefix+"_chrY.csv", "w")
viz_output.write(",".join(["X","Y", "CHROMS", "STARTS", "STOPS", "SAMPLES"]) + "\n")
for key,val in self.pair_info.items():
chroms, positions, ends, samples = map(list, zip(*val))
viz_output.write(",".join([str(key[0]), str(key[1]), "_".join(chroms), "_".join(map(str, positions)), "_".join(map(str, ends)), "_".join(map(str, samples))]) + "\n")
viz_output.close()
self.output_calls.close()
class MENDELIAN_STATS:
def __init__(self, trios, coverage_bins, quality_bins, max_coverage, quality_thresholds):
self.trios = trios
self.coverage_bins = coverage_bins
self.quality_bins = quality_bins
self.max_coverage = max_coverage
self.qual_thresh = quality_thresholds
def initialize(self, vcf_reader):
sample_indices = dict(zip(vcf_reader.samples, range(len(vcf_reader.samples))))
self.trio_indices = []
for i in xrange(len(self.trios)):
if self.trios[i].child not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].child)
if self.trios[i].father not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].father)
if self.trios[i].mother not in sample_indices:
exit("Unable to calculate Mendelian inheritance because no data was found for " + self.trios[i].mother)
# Father, Mother, Child
self.trio_indices.append(map(lambda x: sample_indices[x], [self.trios[i].father, self.trios[i].mother, self.trios[i].child]))
self.coverage_bins = numpy.concatenate(([-100000], self.coverage_bins))
self.quality_bins = numpy.concatenate(([-100000], self.quality_bins))
# Quality/Coverage x Trios x Period x Thresholds
self.all_loci_nstrs = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.all_loci_nmend = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.disc_loci_nstrs = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.disc_loci_nmend = [numpy.zeros((len(self.trios), 5, len(self.coverage_bins))), numpy.zeros((len(self.trios), 5, len(self.quality_bins)))]
self.missing_data_skip_counts = numpy.zeros(len(self.trios))
self.coverage_skip_counts = numpy.zeros(len(self.trios))
# Trios x Period x Thresholds
self.all_loci_nstrs_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.all_loci_nmend_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.disc_loci_nstrs_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
self.disc_loci_nmend_min_q = numpy.zeros((len(self.trios), 5, len(self.coverage_bins)))
def process_record(self, record):
for i in xrange(len(self.trios)):
if any(map(lambda x: record.samples[x]['GT'] is None, self.trio_indices[i])):
self.missing_data_skip_counts[i] += 1
continue
if 'X' in record.CHROM or 'x' in record.CHROM or 'Y' in record.CHROM or 'y' in record.CHROM:
continue
q1, q2, q3 = map(lambda x: record.samples[x]["Q"], self.trio_indices[i])
c1, c2, c3 = map(lambda x: record.samples[x]["DP"], self.trio_indices[i])
a11, a21 = record.samples[self.trio_indices[i][0]]["GT"].split("/")
a21, a22 = record.samples[self.trio_indices[i][1]]["GT"].split("/")
a31, a32 = record.samples[self.trio_indices[i][2]]["GT"].split("/")
discordant = is_discordant(a11, a12, a21, a22)
mendelian = is_mendelian(a11, a12, a21, a22, a31, a32)
# Filter out loci with too high of coverage
if max(c1, c2, c3) > self.max_coverage:
self.coverage_skip_counts[i] += 1
continue
coverage = min(c1, c2, c3)
bin_idx = find_index(self.coverage_bins, coverage)
motif_len = len(record.INFO["MOTIF"])-2
self.all_loci_nstrs [0][i][motif_len][bin_idx] += 1
self.all_loci_nmend [0][i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs[0][i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend[0][i][motif_len][bin_idx] += discordant*mendelian*1
quality = min(q1, q2, q3)
bin_idx = find_index(self.quality_bins, quality)
self.all_loci_nstrs [1][i][motif_len][bin_idx] += 1
self.all_loci_nmend [1][i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs[1][i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend[1][i][motif_len][bin_idx] += discordant*mendelian*1
coverage = min(c1, c2, c3)
bin_idx = find_index(self.coverage_bins, coverage)
if quality > self.qual_thresh[motif_len]:
self.all_loci_nstrs_min_q [i][motif_len][bin_idx] += 1
self.all_loci_nmend_min_q [i][motif_len][bin_idx] += mendelian*1
self.disc_loci_nstrs_min_q [i][motif_len][bin_idx] += discordant*1
self.disc_loci_nmend_min_q [i][motif_len][bin_idx] += discordant*mendelian*1
def finish(self, pdfpage):
print("WARNING: Skipped " + str(self.missing_data_skip_counts) + " loci due to missing data for one or more individual")
print("WARNING: Skipped " + str(self.coverage_skip_counts) + " loci due to too high coverage")
# Iterate over coverage and quality stats
types = ['Coverage', 'Quality', 'Coverage']
bins = [self.coverage_bins, self.quality_bins, self.coverage_bins]
for n in xrange(3):
# Sum across all trios
if n == 0 or n == 1:
all_loci_nstrs = numpy.sum(self.all_loci_nstrs [n], axis=0)
all_loci_nmend = numpy.sum(self.all_loci_nmend [n], axis=0)
disc_loci_nstrs = numpy.sum(self.disc_loci_nstrs[n], axis=0)
disc_loci_nmend = numpy.sum(self.disc_loci_nmend[n], axis=0)
else:
all_loci_nstrs = numpy.sum(self.all_loci_nstrs_min_q, axis=0)
all_loci_nmend = numpy.sum(self.all_loci_nmend_min_q, axis=0)
disc_loci_nstrs = numpy.sum(self.disc_loci_nstrs_min_q, axis=0)
disc_loci_nmend = numpy.sum(self.disc_loci_nmend_min_q, axis=0)
# Create plots for individual periods
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_ylabel("Fraction Mendelian")
ax1.set_title("All sites")
ax2 = fig.add_subplot(222, sharey=ax1)
ax2.set_title("Discordant parental sites")
ax3 = fig.add_subplot(223, sharex=ax1)
ax3.set_xlabel(types[n] + " threshold")
ax3.set_ylabel("# genotypes")
ax3.set_yscale('log')
ax4 = fig.add_subplot(224, sharex=ax2, sharey=ax3)
ax4.set_xlabel(types[n] + " threshold")
ax4.set_yscale('log')
box1 = ax1.get_position()
ax1.set_position([box1.x0, box1.y0, box1.width*0.9, box1.height])
ax2.set_position([box1.x0 + box1.width*1.15, box1.y0, box1.width*0.9, box1.height])
box3 = ax3.get_position()
ax3.set_position([box3.x0, box3.y0, box3.width*0.9, box3.height])
ax4.set_position([box3.x0 + box3.width*1.15, box3.y0, box3.width*0.9, box3.height])
font_size = 9
for i in xrange(5):
nstrs_all = numpy.cumsum(all_loci_nstrs [i][::-1])[::-1]
nmend_all = numpy.cumsum(all_loci_nmend [i][::-1])[::-1]
nstrs_disc = numpy.cumsum(disc_loci_nstrs[i][::-1])[::-1]
nmend_disc = numpy.cumsum(disc_loci_nmend[i][::-1])[::-1]
all_fracs = (1.0*nmend_all/nstrs_all)[1:]
disc_fracs = (1.0*nmend_disc/nstrs_disc)[1:]
ax1.plot(bins[n][1:], all_fracs, '-o', label=str(i+1))
ax2.plot(bins[n][1:], disc_fracs, '-o', label=str(i+1))
ax3.plot(bins[n][1:], nstrs_all[1:], '-o', label=str(i+1))
ax4.plot(bins[n][1:], nstrs_disc[1:], '-o', label=str(i+1))
ax4.legend(bbox_to_anchor=(1.05, 0.9, 0.25, 0.2), loc='center left')
for ax in [ax1, ax2, ax3, ax4]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
pdfpage.savefig(fig)
# Create plots using all periods
# Sum across all periods
all_loci_nstrs = numpy.sum(all_loci_nstrs, axis=0)
all_loci_nmend = numpy.sum(all_loci_nmend, axis=0)
disc_loci_nstrs = numpy.sum(disc_loci_nstrs, axis=0)
disc_loci_nmend = numpy.sum(disc_loci_nmend, axis=0)
# Transform into running sums
all_loci_nstrs = numpy.cumsum(all_loci_nstrs[::-1])[::-1]
all_loci_nmend = numpy.cumsum(all_loci_nmend[::-1])[::-1]
disc_loci_nstrs = numpy.cumsum(disc_loci_nstrs[::-1])[::-1]
disc_loci_nmend = numpy.cumsum(disc_loci_nmend[::-1])[::-1]
# Calculate the fraction of Mendelian inheritance for all loci and discordant loci
all_loci_fracs = (1.0*all_loci_nmend/all_loci_nstrs)[1:]
disc_loci_fracs = (1.0*disc_loci_nmend/disc_loci_nstrs)[1:]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.set_ylabel("Fraction Mendelian")
ax1.set_title("All sites")
ax1.plot(bins[n][1:], all_loci_fracs, '-o')
ax2 = fig.add_subplot(222, sharey=ax1)
ax2.plot(bins[n][1:], disc_loci_fracs, '-o')
ax2.set_title("Discordant parental sites")
ax3 = fig.add_subplot(223, sharex=ax1)
ax3.set_xlabel(types[n] + " threshold")
ax3.set_ylabel("# genotypes")
ax3.set_yscale('log')
ax3.plot(bins[n][1:], all_loci_nstrs[1:], '-o')
ax4 = fig.add_subplot(224, sharex=ax2, sharey=ax3)
ax4.set_xlabel(types[n] + " threshold")
ax4.set_yscale('log')
ax4.plot(bins[n][1:], disc_loci_nstrs[1:], '-o')
for ax in [ax1, ax2, ax3, ax4]:
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(font_size)
pdfpage.savefig(fig)
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(bins[n][1:], all_loci_fracs, '-o', color='b')
ax1.set_ylabel("Fraction Mendelian")
ax1.set_xlabel(types[n] + " threshold")
ax2 = ax1.twinx()
ax2.set_yscale('log')
ax2.plot(bins[n][1:], all_loci_nstrs[1:], '-o', color='g')
pdfpage.savefig(fig)
ax1.axis('equal')
pdfpage.savefig(fig)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.plot(bins[n][1:], all_loci_fracs, '-o', color='b')
ax1.set_ylabel("Fraction Mendelian")
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
ax2.set_xlabel(types[n] + " threshold")
ax2.plot(bins[n][1:], all_loci_nstrs[1:], '-o', color='g')
ax2.set_yscale('log')
ax2.set_ylabel("# Called loci across trios")
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
pdfpage.savefig(fig)
def main():
print("Invocation syntax: python pedigree_analysis.py 1kg_pedigree_file.txt vcf_file.vcf output_file.pdf")
trios, father_son_pairs = read_1kg_pedigree_file(sys.argv[1], header=True)
vcf_reader = vcf.Reader(filename=sys.argv[2])
call_stats = sys.argv[3]
samples = vcf_reader.samples
trios_with_data = []
pairs_with_data = []
for trio in trios:
if trio.child in samples and trio.mother in samples and trio.father in samples:
trios_with_data.append(trio)
print("There are %d trios with data"%len(trios_with_data))
for pair in father_son_pairs:
if pair.father in samples and pair.son in samples:
pairs_with_data.append(pair)
print("There are %d father-son pairs with data"%(len(pairs_with_data)))
coverage_bins = numpy.append(numpy.arange(1.001, 5.0011, 1.0), numpy.arange(6.001, 18.0011, 2.0))
quality_bins = numpy.arange(0.0, 1.0, 0.1)
quality_thresh = [0.9, 0.5, 0.5, 0.5, 0.5, 0.5]
max_coverage = 100
processors = [CHRY_STATS(pairs_with_data, call_stats)]
#mend_stats = MENDELIAN_STATS(trios_with_data, coverage_bins, quality_bins, max_coverage, quality_thresh)
for proc in processors:
proc.initialize(vcf_reader)
for record in vcf_reader:
for proc in processors:
proc.process_record(record)
pp = PdfPages(sys.argv[3]+".pdf")
for proc in processors:
proc.finish(pp, sys.argv[3])
pp.close()
return 0
if __name__ == "__main__":
main()
| gpl-3.0 |
DTOcean/dtocean-core | tests/test_data_definitions_xgrid2d.py | 1 | 4013 | import pytest
import numpy as np
import matplotlib.pyplot as plt
from aneris.control.factory import InterfaceFactory
from dtocean_core.core import (AutoFileInput,
AutoFileOutput,
AutoPlot,
Core)
from dtocean_core.data import CoreMetaData
from dtocean_core.data.definitions import XGrid2D
def test_XGrid2D_available():
new_core = Core()
all_objs = new_core.control._store._structures
assert "XGrid2D" in all_objs.keys()
def test_XGrid2D():
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": [None, 'm', 'POWER!']})
test = XGrid2D()
a = test.get_data(raw, meta)
b = test.get_value(a)
assert b.values.shape == (2,3)
assert b.units == 'POWER!'
assert b.y.units == 'm'
def test_get_None():
test = XGrid2D()
result = test.get_value(None)
assert result is None
@pytest.mark.parametrize("fext", [".nc"])
def test_XGrid2D_auto_file(tmpdir, fext):
test_path = tmpdir.mkdir("sub").join("test{}".format(fext))
test_path_str = str(test_path)
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": [None, 'm', 'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoFileOutput)
FOutCls = fout_factory(meta, test)
fout = FOutCls()
fout._path = test_path_str
fout.data.result = test.get_data(raw, meta)
fout.connect()
assert len(tmpdir.listdir()) == 1
fin_factory = InterfaceFactory(AutoFileInput)
FInCls = fin_factory(meta, test)
fin = FInCls()
fin.meta.result = meta
fin._path = test_path_str
fin.connect()
result = test.get_data(fin.data.result, meta)
assert result.values.shape == (2,3)
assert result.units == 'POWER!'
assert result.y.units == 'm'
def test_XGrid2D_auto_plot(tmpdir):
raw = {"values": np.random.randn(2, 3),
"coords": [['a', 'b'], [-2, 0, 2]]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
def test_XGrid2D_auto_plot_reverse(tmpdir):
raw = {"values": np.random.randn(3, 2),
"coords": [[-2, 0, 2], ['a', 'b']]}
meta = CoreMetaData({"identifier": "test",
"structure": "test",
"title": "test",
"labels": ['x', 'y'],
"units": ['\sum_{n=1}^{\infty} 2^{-n} = 1',
'm',
'POWER!']})
test = XGrid2D()
fout_factory = InterfaceFactory(AutoPlot)
PlotCls = fout_factory(meta, test)
plot = PlotCls()
plot.data.result = test.get_data(raw, meta)
plot.meta.result = meta
plot.connect()
assert len(plt.get_fignums()) == 1
plt.close("all")
| gpl-3.0 |
aajtodd/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
thp44/delphin_6_automation | data_process/2d_1d/archieve/temperature.py | 1 | 18075 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import matplotlib.dates as mdates
import pandas as pd
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
# Application
colors = {'top': '#FBBA00', 'mid': '#B81A5D', 'bottom': '#79C6C0', '1d_brick': '#000000', '1d_mortar': '#BDCCD4'}
project_dict = {'dresden_zp_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0352e2cb22f2c4f15b4': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb0a102e2cb22f2c4f17e9': '2d'}
},
'dresden_zd_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0ba2e2cb22f2c4f15f1': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb2dc02e2cb22f2c4f1873': '2d'}
},
'potsdam_high_ratio_uninsulated_4a':
{'map':
{'5ad9e3462e2cb22f2c4f162e': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adcc9702e2cb22f2c4f18fd': '2d'}
},
'dresden_zp_low_ratio_uninsulated_4a':
{'map':
{'5ad9e6192e2cb22f2c4f175f': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adda7172e2cb20baca57c6e': '2d'}
},
'dresden_zd_low_ratio_uninsulated_4a':
{'map':
{'5ad9e44f2e2cb22f2c4f16a8': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adcd4402e2cb22f2c4f1987': '2d'}
},
'potsdam_low_ratio_uninsulated_4a':
{'map': {'5ad9e4f22e2cb22f2c4f16e5': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5add9b902e2cb20baca57be4': '2d'}
},
'dresden_zp_high_ratio_insulated_4a':
{'map': {'5ae824252e2cb22d48db5955': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae355cf2e2cb2201055c1a4': '2d'}
},
'dresden_zd_high_ratio_insulated_4a':
{'map': {'5ae824d82e2cb22d48db5998': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae398f12e2cb2201055c263': '2d'}
},
'potsdam_high_ratio_insulated_4a':
{'map':
{'5ae82bac2e2cb21560008fe8': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae6ca982e2cb2201055c322': '2d'}
},
'dresden_zp_low_ratio_insulated_4a':
{'map':
{'5ae82e5d2e2cb21560009137': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6fdbf2e2cb20d5891272f': '2d'}
},
'dresden_zd_low_ratio_insulated_4a':
{'map':
{'5ae82cb12e2cb2156000906e': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6d9bf2e2cb2201055c3e1': '2d'}
},
'potsdam_low_ratio_insulated_4a':
{'map':
{'5ae82d3b2e2cb215600090b1': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6edaf2e2cb20d58912670': '2d'}
},
}
result_folder = r'U:\RIBuild\2D_1D\Results'
files = ['temperature profile.d6o']
# Functions
def get_points(result: dict, geo: dict):
points = []
for index_ in result['indices']:
x_ = geo['element_geometry'][index_][1]
y_ = geo['element_geometry'][index_][2]
points.append({'cell': index_, 'x': x_, 'y': y_})
return points
def add_data_to_points(points: list, results: dict, result_name: str):
for cell_ in results['result'].keys():
cell_index = int(cell_.split('_')[1])
for point in points:
if point['cell'] == cell_index:
point[result_name] = np.array(results['result'][cell_][8760:])
break
def main(project_):
projects = list(project_dict[project_]['map'].keys())
parsed_dicts = {'brick_1d': {'temp': {}, 'geo': {}},
'mortar_1d': {'temp': {}, 'geo': {}},
'2d': {'temp': {}, 'geo': {}}, }
for p_ in projects:
for mp_key in project_dict[project_]['map'].keys():
if p_ == mp_key:
key = project_dict[project_]['map'][mp_key]
folder = result_folder + f'/{p_}/results'
geo_file = [file
for file in os.listdir(folder)
if file.endswith('.g6a')][0]
parsed_dicts[key]['temp'], _ = delphin_parser.d6o_to_dict(folder, files[0])
parsed_dicts[key]['geo'] = delphin_parser.g6a_to_dict(folder, geo_file)
x_date = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['brick_1d']['temp']['result']['cell_0'][8760:]))]
# Brick 1D
brick_1d = get_points(parsed_dicts['brick_1d']['temp'], parsed_dicts['brick_1d']['geo'])
brick_1d.sort(key=lambda point: point['x'])
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['temp'], 'temperature')
# Mortar 1D
mortar_1d = get_points(parsed_dicts['mortar_1d']['temp'], parsed_dicts['mortar_1d']['geo'])
mortar_1d.sort(key=lambda point: point['x'])
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['temp'], 'temperature')
# 2D
sim_2d = get_points(parsed_dicts['2d']['temp'], parsed_dicts['2d']['geo'])
sim_2d.sort(key=lambda point: (point['x'], point['y']))
add_data_to_points(sim_2d, parsed_dicts['2d']['temp'], 'temperature')
# Plots
def plot_locations(quantity):
# Axes 00
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[0]['x']:.4f} and 2D-Location: {sim_2d[0]['x']:.4f}")
plt.plot(x_date, brick_1d[0][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[0][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[1][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[2][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 01
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[1]['x']:.4f} and 2D-Location: {sim_2d[3]['x']:.4f}")
plt.plot(x_date, brick_1d[1][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[1][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[3][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[4][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[5][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 10
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[2]['x']:.4f} and 2D-Location: {sim_2d[6]['x']:.4f}")
plt.plot(x_date, brick_1d[2][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[2][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[6][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[7][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[8][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 11
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[3]['x']:.4f} and 2D-Location: {sim_2d[9]['x']:.4f}")
plt.plot(x_date, brick_1d[3][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[3][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[9][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[10][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[11][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 20
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[4]['x']:.4f} and 2D-Location: {sim_2d[12]['x']:.4f}")
plt.plot(x_date, brick_1d[4][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[4][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[12][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[13][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[14][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 21
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[5]['x']:.4f} and 2D-Location: {sim_2d[15]['x']:.4f}")
plt.plot(x_date, brick_1d[5][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[5][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[15][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[16][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[17][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
#plot_locations(quantity='temperature')
#plt.show()
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return (abs(x2 - x1))/abs(x2) * 100
def differences(i, plots=False):
avg_2d = np.mean([sim_2d[i]['temperature'], sim_2d[i+2]['temperature'], sim_2d[i+2]['temperature']], axis=0)
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('C')
plt.figure()
plt.title(f"Temperature - Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
def differences_weighted(i, plots=False):
avg_2d = np.average(a=[sim_2d[i]['temperature'],
sim_2d[i+2]['temperature'],
sim_2d[i+2]['temperature']],
axis=0,
weights=[56, 24., 56])
brick_abs = abs_diff(brick_1d[i]['temperature'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['temperature'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['temperature'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['temperature'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Temperature - Weighted Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
plt.figure()
plt.title(f"Temperature - Weighted Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
dataframes = []
weighted_dataframes = []
for index in range(len(brick_1d)):
dataframes.append(differences(index))
weighted_dataframes.append(differences_weighted(index))
#plt.show()
result_dataframe = pd.concat(dataframes, axis=1)
w_result_dataframe = pd.concat(weighted_dataframes, axis=1)
absolute_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
absolute_df.columns = absolute_df.columns.droplevel(level=2)
relative_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
relative_df.columns = relative_df.columns.droplevel(level=2)
w_absolute_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
w_absolute_df.columns = w_absolute_df.columns.droplevel(level=2)
w_relative_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
w_relative_df.columns = w_relative_df.columns.droplevel(level=2)
plt.figure()
ax = absolute_df.boxplot()
ax.set_ylim(-20, 20)
ax.set_ylabel('Temperature - C')
ax.set_title('Absolute Differences')
#plt.show()
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
def excel():
writer = pd.ExcelWriter(out_folder + '/temperature.xlsx')
relative_df.describe().to_excel(writer, 'relative')
absolute_df.describe().to_excel(writer, 'absolute')
writer.save()
#excel()
def save_relative():
hdf_file = out_folder + '/relative_temperature.h5'
w_relative_df.to_hdf(hdf_file, project_, append=True)
save_relative()
for project_key in project_dict.keys():
print(f'Processing {project_key}')
main(project_key) | mit |
BiaDarkia/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 33 | 4174 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples. Note you can
increase this to label more than 30 by changing `max_iterations`. Labeling
more than 30 can be useful to get a sense for the speed of convergence of
this active learning technique.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
max_iterations = 5
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(max_iterations):
if len(unlabeled_indices) == 0:
print("No unlabeled items left to label.")
break
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print("Iteration %i %s" % (i, 70 * "_"))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points,
n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select up to 5 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[::-1]
uncertainty_index = uncertainty_index[
np.in1d(uncertainty_index, unlabeled_indices)][:5]
# keep track of indices that we get labels for
delete_indices = np.array([])
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" %
((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
# for more than 5 iterations, visualize the gain only on the first 5
if i < 5:
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r, interpolation='none')
sub.set_title("predict: %i\ntrue: %i" % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += len(uncertainty_index)
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.", y=1.15)
plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2,
hspace=0.85)
plt.show()
| bsd-3-clause |
stharrold/ARCHIVED_bench_fastq | bench_fastq/utils.py | 2 | 15946 | #!/usr/bin/env python
"""Utils to parse the terminal output from bench_compress.sh
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import json
import datetime as dt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def parse_elapsed(elapsed):
"""Parse string of elapsed time from output of Unix 'time' command into
`datetime.timedelta`.
Parameters
----------
elapsed : string
Elapsed time field output from Unix 'time' command.
Format: [HH:]MM:SS[.SSS]
Returns
-------
elapsed_dt : datetime.timedelta
Elapsed time as ``datetime.timedelta``.
"""
elapsed_arr = elapsed.split(':')
if len(elapsed_arr) == 2:
hours = '0'
[minutes, seconds] = elapsed_arr
elif len(elapsed_arr) == 3:
[hours, minutes, seconds] = elapsed_arr
else:
raise AssertionError(("Program error. Elapsed time does not have 2 or 3 fields:\n" +
"{ea}").format(ea=elapsed_arr))
hours_int = int(float(hours))
minutes_int = int(float(minutes))
seconds_int = int(float(seconds))
milliseconds_int = int((float(seconds) - seconds_int) / 0.001)
elapsed_dt = dt.timedelta(hours=hours_int,
minutes=minutes_int,
seconds=seconds_int,
milliseconds=milliseconds_int)
return elapsed_dt
def recursive_timedelta_to_totsec(dobj):
"""Recursively convert ``datetime.timedelta`` elements to total seconds
in a ``dict``.
Call this function before writing the ``dict`` to JSON.
Parameters
----------
dobj : dict
``dict`` that may contain ``datetime.timedelta`` elements. ``dict`` may
be nested.
Returns
-------
dobj_converted : dict
``dict`` with ``datetime.timedelta`` elements converted to
total seconds.
"""
dobj_converted = {}
for key in dobj:
if isinstance(dobj[key], dt.timedelta):
dobj_converted[key] = dobj[key].total_seconds()
elif isinstance(dobj[key], dict):
dobj_converted[key] = recursive_timedelta_to_totsec(dobj=dobj[key])
else:
dobj_converted[key] = dobj[key]
return dobj_converted
def parse_compress(fin, fout=None):
"""Parse terminal output from bench_compress.sh
Parse by filename, file size, compression method, compression ratio, compression and decompression speed.
Note: This function is rigidly dependent upon bench_compress.sh.
Parameters
----------
fin : string
Path to text file with terminal output.
fout : {None}, string, optional
Path to output .json file of parsed terminal output.
Returns
-------
parsed : dict
``dict`` of parsed terminal output.
"""
# Check input.
fpath = os.path.abspath(fin)
if not os.path.isfile(fpath):
raise IOError("File does not exist:\n{fpath}".format(fpath=fpath))
if fout is not None:
if not os.path.splitext(fout)[1] == '.json':
raise IOError(("File extension is not '.json':\n" +
"{fout}").format(fout=fout))
# Parse text file into dict.
parsed = {}
skip_lines = None
catch_initial_size = None
catch_comp_cmd = None
catch_comp_time = None
catch_comp_size = None
catch_decomp_cmd = None
catch_decomp_time = None
catch_decomp_size = None
with open(fpath, 'rb') as fobj:
for line in fobj:
line = line.rstrip()
if line.startswith('Begin processing:'):
line_arr = line.split(':')
fname = os.path.splitext(os.path.basename(line_arr[1]))[0]
parsed[fname] = {}
continue
# Note: Typo in original script "Intial". Do not correct.
elif line.startswith('Intial .fastq size:'):
catch_initial_size = True
skip_lines = 1
continue
elif catch_initial_size and skip_lines >= 0:
if skip_lines > 0:
skip_lines -= 1
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname]['size_bytes'] = int(line_arr[0])
assert os.path.basename(line_arr[1]) == fname
catch_initial_size = False
skip_lines = None
continue
elif line.startswith('Iteration:'):
line_arr = line.split(':')
iteration = int(line_arr[1])
parsed[fname][iteration] = {}
continue
elif line.startswith('Testing'):
line_arr = line.rstrip(':').split()
method = line_arr[1]
parsed[fname][iteration][method] = {}
catch_comp_cmd = True
continue
elif catch_comp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['compress'] = {}
parsed[fname][iteration][method]['compress']['command'] = line
catch_comp_cmd = False
catch_comp_time = True
continue
elif catch_comp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['compress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['compress']['CPU_percent'] = pct_cpu
catch_comp_time = False
catch_comp_size = True
continue
elif catch_comp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['compress']['size_bytes'] = int(line_arr[0])
catch_comp_size = False
skip_lines = None
catch_decomp_cmd = True
continue
elif catch_decomp_cmd and line.startswith('+ sudo time'):
parsed[fname][iteration][method]['decompress'] = {}
parsed[fname][iteration][method]['decompress']['command'] = line
catch_decomp_cmd = False
catch_decomp_time = True
continue
elif catch_decomp_time and ('elapsed' in line) and ('CPU' in line):
line_arr = line.split()
elapsed = parse_elapsed(elapsed=line_arr[2].strip('elapsed'))
parsed[fname][iteration][method]['decompress']['elapsed_time'] = elapsed
pct_cpu = line_arr[3].strip('%CPU')
if pct_cpu == '?':
pct_cpu = np.NaN
else:
pct_cpu = float(pct_cpu)
parsed[fname][iteration][method]['decompress']['CPU_percent'] = pct_cpu
catch_decomp_time = False
catch_decomp_size = True
continue
elif catch_decomp_size:
if line.startswith('+ du --bytes'):
skip_lines = 0
continue
elif skip_lines == 0:
line_arr = line.split()
parsed[fname][iteration][method]['decompress']['size_bytes'] = int(line_arr[0])
if parsed[fname]['size_bytes'] != parsed[fname][iteration][method]['decompress']['size_bytes']:
# noinspection PyPep8
print(("WARNING: File size before and after compression test do not match.\n" +
"file name = {fname}\n" +
"method = {method}\n" +
"initial size (bytes) = {init_size}\n" +
"final size (bytes) = {finl_size}").format(fname=fname, method=method,
init_size=parsed[fname]['size_bytes'],
finl_size=parsed[fname][iteration][method]['decompress']['size_bytes']),
file=sys.stderr)
catch_decomp_size = False
skip_lines = None
continue
# Write out dict as JSON.
if fout is not None:
parsed_converted = recursive_timedelta_to_totsec(dobj=parsed)
print("Writing parsed text to: {fout}".format(fout=fout))
with open(fout, "wb") as fobj:
json.dump(parsed_converted, fobj, indent=4, sort_keys=True)
return parsed
def parsed_dict_to_df(parsed_dict):
"""Convert ``dict`` from parse_compress to ``pandas.dataframe``.
Parameters
----------
parsed_dict : dict
``dict`` of parsed terminal output.
Returns
-------
parsed_df : pandas.dataframe
``pandas.dataframe`` with heirarchical index by filename, iteration,
method, quantity.
"""
# TODO: make recursive method, e.g. http://stackoverflow.com/questions/9538875/recursive-depth-of-python-dictionary
filename_df_dict = {}
for filename in parsed_dict:
iteration_df_dict = {}
for iteration in parsed_dict[filename]:
method_df_dict = {}
# Skip size_bytes for file since not a nested dict.
if isinstance(parsed_dict[filename][iteration], dict):
for method in parsed_dict[filename][iteration]:
method_df_dict[method] = pd.DataFrame.from_dict(parsed_dict[filename][iteration][method],
orient='columns')
iteration_df_dict[iteration] = pd.concat(method_df_dict, axis=1)
filename_df_dict[filename] = pd.concat(iteration_df_dict, axis=1)
parsed_df = pd.concat(filename_df_dict, axis=1)
parsed_df.index.names = ['quantity']
parsed_df.columns.names = ['filename', 'iteration', 'method', 'process']
return parsed_df
def condense_parsed_df(parsed_df, parsed_dict):
"""Condense ``pandas.dataframe`` from parsed terminal output.
Calculate compression/decompression rate in GB per minute and compression ratio, averaging over iterations and
taking median of results.
Parameters
----------
parsed_df : pandas.DataFrame
``pandas.DataFrame`` from `parsed_dict_to_df`.
Index name: quantity
Heirarchical column names: filename, method, process, iteration
parsed_dict : dict
Nested ``dict`` from parse_compress.
Returns
-------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
See Also
--------
parsed_dict_to_df, parse_compress, reduce_condensed_df
"""
# Calculate compression/decompression rate in GB per minute and compression ratio.
# Drop quantities except for 'GB_per_minute' and 'compression_ratio'. Drop test files and incomplete tests.
# Average over iterations. Take median of results.
condensed_df = parsed_df.stack(['filename', 'method', 'process', 'iteration']).unstack('quantity').copy()
condensed_df['elapsed_seconds'] = condensed_df['elapsed_time'].apply(
lambda x: x.total_seconds() if isinstance(x, dt.timedelta) else x)
condensed_df['elapsed_seconds'] = condensed_df['elapsed_seconds'].apply(lambda x: np.NaN if x == 0.0 else x)
condensed_df['GB_per_minute'] = np.NaN
condensed_df['compression_ratio'] = np.NaN
# TODO: Use .values to vectorize
for fname in condensed_df.index.levels[0].values:
# TODO: remove SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame
condensed_df.loc[fname, 'GB_per_minute'].update(
(parsed_dict[fname]['size_bytes'] / condensed_df.loc[fname, 'elapsed_seconds']).multiply(60.0 / 1.0E9))
condensed_df.loc[fname, 'compression_ratio'].update(
condensed_df.loc[fname, 'size_bytes'].div(parsed_dict[fname]['size_bytes']))
return condensed_df
def reduce_condensed_df(condensed_df):
"""Reduce ``pandas.DataFrame`` from `condense_parsed_df` by averaging over iterations and taking the median over
file names.
Parameters
----------
condensed_df : pandas.DataFrame
Heirarchical index names: method, process, quantity
Column name: quantity
Returns
-------
reduced_ser : pandas.Series'
``pandas.Series`` from `condense_parsed_df`.
Heirarchical index names: method, process, quantity
See Also
--------
condense_parsed_df, plot_rate, plot_ratio
"""
reduced_ser = condensed_df.stack().unstack(['filename', 'method', 'process', 'quantity']).mean()
reduced_ser = reduced_ser.unstack(['method', 'process', 'quantity']).median()
return reduced_ser
def plot_rate(reduced_ser, fout=None):
"""Plot processing rate vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_ratio
"""
plt.figure()
pd.DataFrame.plot(reduced_ser.unstack(['quantity'])['GB_per_minute'].unstack(['process']),
title="Processing rate vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
legend = plt.legend(loc='best', title="Process")
legend.get_texts()[0].set_text('Compress')
legend.get_texts()[1].set_text('Decompress')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Processing rate (GB per minute)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
def plot_ratio(reduced_ser, fout=None):
"""Plot compression ratio vs compression method.
Parameters
----------
reduced_ser : pandas.Series
``pandas.Series`` from `reduce_condensed_df`.
Heirarchical index names: method, process, quantity
fout : {None}, string, optional
Path to save plot as image. Extension must be supported by ``matplotlib.pyplot.savefig()``
Returns
-------
None
See Also
--------
reduce_condensed_df, plot_rate
"""
plt.figure()
pd.Series.plot(reduced_ser.unstack(['quantity'])['compression_ratio'].unstack(['process'])['compress'],
title="Compression size ratio vs compression method\nmedian results over all files",
sort_columns=True, kind='bar')
xtick_labels = ('(bzip2, --fast)', '(fqz_comp, default)', '(gzip, --fast)', '(quip, default)')
plt.xticks(xrange(len(xtick_labels)), xtick_labels, rotation=45)
plt.xlabel("Compression method with options")
plt.ylabel("Compression size ratio\n(compressed size / decompressed size)")
if fout is not None:
print("Writing plot to: {fout}".format(fout=fout))
plt.savefig(fout, bbox_inches='tight')
plt.show()
return None
| mit |
ESMG/ESMG-configs | CCS1/plot_vort.py | 1 | 2295 | import numpy as np
import netCDF4
import os
import sys
import subprocess
import pyroms
from pyroms_toolbox import jday2date
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
# draw line around map projection limb.
# color background of map projection region.
# missing values over land will show up this color.
# plot sst, then ice with pcolor
# add a title.
#year = int(sys.argv[1])
#lst_year = [year]
lst_file = []
#for year in lst_year:
# year = np.str(year)
#lst = subprocess.getoutput('ls clima/*.nc')
lst = subprocess.getoutput('ls 19800110.ocean_daily.nc')
lst = lst.split()
lst_file = lst_file + lst
#grd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('prog.nc')
grd = netCDF4.Dataset('sea_ice_geometry.nc', "r")
clat = grd.variables["geolatb"][:]
clon = grd.variables["geolonb"][:]
m = Basemap(llcrnrlon=-121., llcrnrlat=17., urcrnrlon=-125.0, urcrnrlat=53.0,\
rsphere=(6378137.00,6356752.3142),\
resolution='h', projection='lcc',\
lat_0=30., lat_1=40.0, lon_0=-78.)
x, y = m(clon, clat)
levels = np.arange(-.6, 0.6, 0.01)
cmap = plt.cm.get_cmap("seismic")
for file in lst_file:
print("Plotting "+file)
nc = netCDF4.Dataset(file, "r")
time = nc.variables["time"][:]
ntim = len(time)
# for it in range(10):
for it in range(0,ntim,30):
fig = plt.figure(figsize=(4,9))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
# ax.axis(xmin=-300,xmax=300)
# m.drawmapboundary(fill_color='0.3')
m.drawcoastlines()
ssh = nc.variables["RV"][it,0,:-1,:-1]
ssh *= 1.e4
time = nc.variables["time"][it]
cs = m.contourf(x, y, ssh, levels=levels, cmap=cmap, extend='both')
# csa = m.contour(x, y, ssh, levels=levels, linewidths=(0.5,))
# cs = plt.contourf(clon, clat, ssh, levels=levels, cmap=cmap, extend='both')
plt.title('Surface RV')
# csa = plt.contour(clon, clat, ssh, levels=levels, linewidths=(0.5,))
cbaxes = fig.add_axes([0.1, 0.05, 0.8, 0.02])
plt.colorbar(orientation='horizontal', cax=cbaxes)
print('printing frame:', it)
fig.savefig('movie/vort_%(number)04d.png'%{'number': it})
plt.close()
nc.close()
| gpl-3.0 |
mrcslws/htmresearch | projects/thing_classification/thing_convergence.py | 3 | 13625 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file is used to run Thing experiments using simulated sensations.
"""
import random
import os
from math import ceil
import numpy as np
import pprint
import matplotlib.pyplot as plt
from sklearn import manifold, random_projection
from htmresearch.frameworks.layers.l2_l4_inference import (
L4L2Experiment, rerunExperimentFromLogfile)
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 256,
"cellsPerColumn": 16,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.01,
"minThreshold": 19,
"predictedSegmentDecrement": 0.0,
"activationThreshold": 19,
"sampleSize": 20,
"implementation": "etm",
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 256 * 16,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.5,
"synPermProximalDec": 0.0,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 9,
"sampleSizeProximal": 10,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 30,
"connectedPermanenceDistal": 0.5,
"distalSegmentInhibitionFactor": 1.001,
"learningMode": True,
}
def locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i,v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats)-i + 1
# Never differs - converged in one iteration
return 1
def averageConvergencePoint(inferenceStats, prefix, minOverlap, maxOverlap,
settlingTime):
"""
inferenceStats contains activity traces while the system visits each object.
Given the i'th object, inferenceStats[i] contains activity statistics for
each column for each region for the entire sequence of sensations.
For each object, compute the convergence time - the first point when all
L2 columns have converged.
Return the average convergence time across all objects.
Given inference statistics for a bunch of runs, locate all traces with the
given prefix. For each trace locate the iteration where it finally settles
on targetValue. Return the average settling iteration across all runs.
"""
convergenceSum = 0.0
# For each object
for stats in inferenceStats:
# For each L2 column locate convergence time
convergencePoint = 0.0
for key in stats.iterkeys():
if prefix in key:
columnConvergence = locateConvergencePoint(
stats[key], minOverlap, maxOverlap)
# Ensure this column has converged by the last iteration
# assert(columnConvergence <= len(stats[key]))
convergencePoint = max(convergencePoint, columnConvergence)
convergenceSum += ceil(float(convergencePoint)/settlingTime)
return convergenceSum/len(inferenceStats)
def loadThingObjects(numCorticalColumns=1, objDataPath='./data/'):
"""
Load simulated sensation data on a number of different objects
There is one file per object, each row contains one feature, location pairs
The format is as follows
[(-33.6705, 75.5003, 2.4207)/10] => [[list of active bits of location],
[list of active bits of feature]]
The content before "=>" is the true 3D location / sensation
The number of active bits in the location and feature is listed after "=>".
@return A simple object machine
"""
# create empty simple object machine
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=numCorticalColumns,
numFeatures=0,
numLocations=0,
)
for _ in range(numCorticalColumns):
objects.locations.append([])
objects.features.append([])
objFiles = []
for f in os.listdir(objDataPath):
if os.path.isfile(os.path.join(objDataPath, f)):
if '.log' in f:
objFiles.append(f)
idx = 0
OnBitsList = []
for f in objFiles:
objName = f.split('.')[0]
objName = objName[4:]
objFile = open('{}/{}'.format(objDataPath, f))
sensationList = []
for line in objFile.readlines():
# parse thing data file and extract feature/location vectors
sense = line.split('=>')[1].strip(' ').strip('\n')
OnBitsList.append(float(line.split('] =>')[0].split('/')[1]))
location = sense.split('],[')[0].strip('[')
feature = sense.split('],[')[1].strip(']')
location = np.fromstring(location, sep=',', dtype=np.uint8)
feature = np.fromstring(feature, sep=',', dtype=np.uint8)
# add the current sensation to object Machine
sensationList.append((idx, idx))
for c in range(numCorticalColumns):
objects.locations[c].append(set(location.tolist()))
objects.features[c].append(set(feature.tolist()))
idx += 1
objects.addObject(sensationList, objName)
print "load object file: {} object name: {} sensation # {}".format(
f, objName, len(sensationList))
OnBitsList
OnBitsList = np.array(OnBitsList)
plt.figure()
plt.hist(OnBitsList)
return objects, OnBitsList
def trainNetwork(objects, numColumns, l4Params, l2Params, verbose=False):
print " Training sensorimotor network ..."
objectNames = objects.objects.keys()
numObjects = len(objectNames)
exp = L4L2Experiment("shared_features",
L2Overrides=l2Params,
L4Overrides=l4Params,
numCorticalColumns=numColumns)
exp.learnObjects(objects.provideObjectsToLearn())
settlingTime = 1
L2Representations = exp.objectL2Representations
# if verbose:
# print "Learned object representations:"
# pprint.pprint(L2Representations, width=400)
# print "=========================="
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
maxSensationNumber = 30
overlapMat = np.zeros((numObjects, numObjects, maxSensationNumber))
numL2ActiveCells = np.zeros((numObjects, maxSensationNumber))
for objectIdx in range(numObjects):
objectId = objectNames[objectIdx]
obj = objects[objectId]
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
exp.sendReset()
for sensationNumber in range(maxSensationNumber):
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if sensationNumber >= len(objectCopy):
pair = objectCopy[-1]
else:
pair = objectCopy[sensationNumber]
if numColumns > 1:
raise NotImplementedError
else:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"includeRandomLocation": False,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
for i in range(numObjects):
overlapMat[objectIdx, i, sensationNumber] = len(
exp.getL2Representations()[0] &
L2Representations[objects.objects.keys()[i]][0])
# if verbose:
# print "Intersection with {}:{}".format(
# objectNames[i], overlapMat[objectIdx, i])
for c in range(numColumns):
numL2ActiveCells[objectIdx, sensationNumber] += len(
exp.getL2Representations()[c])
print "{} # L2 active cells {}: ".format(sensationNumber,
numL2ActiveCells[
objectIdx, sensationNumber])
if verbose:
print "Output for {}: {}".format(objectId, exp.getL2Representations())
print "Final L2 active cells {}: ".format(
numL2ActiveCells[objectIdx, sensationNumber])
print
exp.sendReset()
expResult = {'overlapMat': overlapMat,
'numL2ActiveCells': numL2ActiveCells}
return expResult
def computeAccuracy(expResult, objects):
objectNames = objects.objects.keys()
overlapMat = expResult['overlapMat'][:, :, -1]
numL2ActiveCells = expResult['numL2ActiveCells'][:, -1]
numCorrect = 0
numObjects = overlapMat.shape[0]
numFound = 0
percentOverlap = np.zeros(overlapMat.shape)
for i in range(numObjects):
for j in range(i, numObjects):
percentOverlap[i, j] = overlapMat[i, j] # / np.min([numL2ActiveCells[i], numL2ActiveCells[j]])
objectNames = np.array(objectNames)
for i in range(numObjects):
# idx = np.where(overlapMat[i, :]>confuseThresh)[0]
idx = np.where(percentOverlap[i, :] == np.max(percentOverlap[i, :]))[0]
print " {}, # sensations {}, best match is {}".format(
objectNames[i], len(objects[objectNames[i]]), objectNames[idx])
found = len(np.where(idx == i)[0]) > 0
numFound += found
if not found:
print "<=========== {} was not detected ! ===========>".format(objectNames[i])
if len(idx) > 1:
continue
if idx[0] == i:
numCorrect += 1
accuracy = float(numCorrect)/numObjects
numPerfect = len(np.where(numL2ActiveCells<=40)[0])
print "accuracy: {} ({}/{}) ".format(accuracy, numCorrect, numObjects)
print "perfect retrival ratio: {} ({}/{}) ".format(
float(numPerfect)/numObjects, numPerfect, numObjects)
print "Object detection ratio {}/{} ".format(numFound, numObjects)
return accuracy
def runExperimentAccuracyVsL4Thresh():
accuracyVsThresh = []
threshList = np.arange(13, 20)
for thresh in threshList:
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
l4Params['minThreshold'] = thresh
l4Params['activationThreshold'] = thresh
objects = loadThingObjects(1, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
accuracyVsThresh.append(accuracy)
plt.figure()
plt.plot(threshList, accuracyVsThresh, '-o')
plt.xlabel('L4 distal Threshold')
plt.ylabel('Classification Accuracy')
plt.savefig('accuracyVsL4Thresh.pdf')
return threshList, accuracyVsThresh
if __name__ == "__main__":
# uncomment to plot accuracy as a function of L4 threshold
# threshList, accuracyVsThresh = runExperimentAccuracyVsL4Thresh()
numColumns = 1
l2Params = getL2Params()
l4Params = getL4Params()
verbose = 1
objects, OnBitsList = loadThingObjects(numColumns, './data')
expResult = trainNetwork(objects, numColumns, l4Params, l2Params, True)
accuracy = computeAccuracy(expResult, objects)
objectNames = objects.objects.keys()
numObjects = len(objectNames)
overlapMat = expResult['overlapMat']
numL2ActiveCells = expResult['numL2ActiveCells']
objectNames = objects.objects.keys()
numObjects = len(objectNames)
plt.figure()
for sensationNumber in range(10):
plt.imshow(overlapMat[:, :, sensationNumber])
plt.xticks(range(numObjects), objectNames, rotation='vertical', fontsize=4)
plt.yticks(range(numObjects), objectNames, fontsize=4)
plt.title('pairwise overlap at step {}'.format(sensationNumber))
plt.xlabel('target representation')
plt.ylabel('inferred representation')
plt.tight_layout()
plt.savefig('plots/overlap_matrix_step_{}.png'.format(sensationNumber))
# plot number of active cells for each object
plt.figure()
objectNamesSort = []
idx = np.argsort(expResult['numL2ActiveCells'][:, -1])
for i in idx:
objectNamesSort.append(objectNames[i])
plt.plot(numL2ActiveCells[idx, -1])
plt.xticks(range(numObjects), objectNamesSort, rotation='vertical', fontsize=5)
plt.tight_layout()
plt.ylabel('Number of active L2 cells')
plt.savefig('plots/number_of_active_l2_cells.pdf')
#
| agpl-3.0 |
GuessWhoSamFoo/pandas | pandas/tests/frame/conftest.py | 1 | 5594 | import numpy as np
import pytest
from pandas import DataFrame, NaT, compat, date_range
import pandas.util.testing as tm
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
return DataFrame(tm.getSeriesData())
@pytest.fixture
def float_frame_with_na():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
df = DataFrame(tm.getSeriesData())
# set some NAs
df.loc[5:10] = np.nan
df.loc[15:20, -2:] = np.nan
return df
@pytest.fixture
def float_frame2():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['D', 'C', 'B', 'A']
"""
return DataFrame(tm.getSeriesData(), columns=['D', 'C', 'B', 'A'])
@pytest.fixture
def bool_frame_with_na():
"""
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
df = DataFrame(tm.getSeriesData()) > 0
df = df.astype(object)
# set some NAs
df.loc[5:10] = np.nan
df.loc[15:20, -2:] = np.nan
return df
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
"""
df = DataFrame({k: v.astype(int)
for k, v in compat.iteritems(tm.getSeriesData())})
# force these all to int64 to avoid platform testing issues
return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64)
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_string_frame():
"""
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
"""
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
return df
@pytest.fixture
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype('float32')
df.B = df.B.astype('float32')
df.C = df.C.astype('float16')
df.D = df.D.astype('float64')
return df
@pytest.fixture
def mixed_float_frame2():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame(tm.getSeriesData())
df.D = df.D.astype('float32')
df.C = df.C.astype('float32')
df.B = df.B.astype('float16')
df.D = df.D.astype('float64')
return df
@pytest.fixture
def mixed_int_frame():
"""
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame({k: v.astype(int)
for k, v in compat.iteritems(tm.getSeriesData())})
df.A = df.A.astype('int32')
df.B = np.ones(len(df.B), dtype='uint64')
df.C = df.C.astype('uint8')
df.D = df.C.astype('int64')
return df
@pytest.fixture
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'float32': np.array([1.] * 10, dtype='float32'),
'int32': np.array([1] * 10, dtype='int32')},
index=np.arange(10))
@pytest.fixture
def timezone_frame():
"""
Fixture for DataFrame of date_range Series with different time zones
Columns are ['A', 'B', 'C']; some entries are missing
"""
df = DataFrame({'A': date_range('20130101', periods=3),
'B': date_range('20130101', periods=3,
tz='US/Eastern'),
'C': date_range('20130101', periods=3,
tz='CET')})
df.iloc[1, 1] = NaT
df.iloc[1, 2] = NaT
return df
@pytest.fixture
def empty_frame():
"""
Fixture for empty DataFrame
"""
return DataFrame({})
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
return tm.makeTimeSeries(nper=30)
@pytest.fixture
def datetime_series_short():
"""
Fixture for Series of floats with DatetimeIndex
"""
return tm.makeTimeSeries(nper=30)[5:]
@pytest.fixture
def simple_frame():
"""
Fixture for simple 3x3 DataFrame
Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].
"""
arr = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
return DataFrame(arr, columns=['one', 'two', 'three'],
index=['a', 'b', 'c'])
@pytest.fixture
def frame_of_index_cols():
"""
Fixture for DataFrame of columns that can be used for indexing
Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];
'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.
"""
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5),
('tuple', 'as', 'label'): np.random.randn(5)})
return df
| bsd-3-clause |
r-mart/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
anderson1008/NOCulator | hring/src/Script/my_print.py | 1 | 8437 | #!/usr/bin/python
import sys
import os
import re
import fnmatch
import string
import matplotlib.pyplot as plt
def print_period(stat):
# use to profile the application running solo.
# stat is an iterator or array
i = 0
for item in stat:
plt.plot(item, label=str(i))
plt.legend()
i = i + 1
def print_double_array (x):
for x_i in x:
sys.stdout.write(str("%.2f" % x_i) + ' ')
print "\n"
sys.stdout.flush()
def print_int_array (x):
for x_i in x:
sys.stdout.write(str(x_i) + ' ')
print "\n"
sys.stdout.flush()
def print_stat_dict (my_stat):
for key, value in iter(sorted(my_stat.iteritems())):
if type(value) is not list:
print key.ljust(20), value.ljust(20)
# else:
# for element in value:
# print element
def print_power (stat):
output_str = '\n\n############# Power Distribution ################\n\n'
output_str = output_str + ''.ljust(15) + 'Static'.ljust(20) + 'Dynamic'.ljust(20) + 'Overall'.ljust(20) + '\n'
## print BLESS
static_percent = "{:.2f}".format(stat[0]/stat[2]*100)
dynamic_percent = "{:.2f}".format(stat[1]/stat[2]*100)
output_str = output_str + 'BLESS'.ljust(15) + ('%s (%s%%)'%("{:.2f}".format(stat[0]),static_percent)).ljust(20) + ('%s (%s%%)'%("{:.2f}".format(stat[1]),dynamic_percent)).ljust(20) + str(stat[2]).ljust(20) + '\n'
# print MBNoC
static_percent = "{:.2f}".format(stat[3]/stat[5]*100)
dynamic_percent = "{:.2f}".format(stat[4]/stat[5]*100)
output_str = output_str + 'MBNoC'.ljust(15) + ('%s (%s%%)'%("{:.2f}".format(stat[3]),static_percent)).ljust(20) + ('%s (%s%%)'%("{:.2f}".format(stat[4]),dynamic_percent)).ljust(20) + str(stat[5]).ljust(20)
output_str = output_str + '\n'
print output_str
def print_power_breakdown (stat):
output_str = '\n\n############# Power Breakdown ################\n\n'
output_str = output_str + ''.ljust(15) + 'Static'.ljust(20) + 'Dynamic'.ljust(20) + 'Overall'.ljust(20) + '\n'
output_str = output_str + 'Component'.ljust(15) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + '\n'
print_order = ['DFF', 'portAlloc', 'RC', 'Xbar', 'Local', 'permNet', 'link']
for component in range (0, 7):
output_str = output_str + print_order[component].ljust(15)
for metric in stat:
output_str = output_str + str(metric[component+1]).ljust(10)
output_str = output_str + '\n'
print output_str
def print_final_stat (stat):
output_str = '\n\n############# Overall ################\n\n'
output_str = output_str + ''.ljust(20) + 'weighted_speedup'.ljust(20) + 'Energy'.ljust(20) + 'Throughput'.ljust(20) + 'Defection Rate'.ljust(20) + '\n'
output_str = output_str + 'Load'.ljust(10) + 'Count'.ljust(10)
for i in range (0, 4):
output_str = output_str + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10)
output_str = output_str + '\n' + 'Low'.ljust(10)
for metric in stat[0]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'Medium'.ljust(10)
for metric in stat[1]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'High'.ljust(10)
for metric in stat[2]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
output_str = output_str + 'Average'.ljust(10)
for metric in stat[3]:
output_str = output_str + str(metric).ljust(10)
output_str = output_str + '\n'
print output_str
return output_str
def print_for_plot (stat):
output_str = '\n\n############# Print for plot ################\n\n'
output_str = output_str + 'Baseline of each metrics of interest is 1.\nEach metric is normailized to BLESS with the same network size.\n\n'
output_str = output_str + 'Load'.ljust(8) + 'Count'.ljust(8) + 'ws'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x6'.ljust(8) + 'engy'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + 'th'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + 'defl'.ljust(8) + '4x4'.ljust(8) + '8x8'.ljust(8) + '16x16'.ljust(8) + '\n'
groups = ['Low','Medium','High','Average']
i = 0
for element in stat:
output_str = output_str + groups[i].ljust(8)
for metric in element:
output_str = output_str + str(metric).ljust(8)
i = i + 1
output_str = output_str + '\n'
print output_str
return output_str
def print_synth (stat, design):
traffic = str(stat.pop(0))
network = str(stat.pop(0))
#output_str = '\n\n############# ' + "Traffic = " + traffic.ljust(20) + "Network = " + network.ljust(20) + ' ################\n\n'
#output_str = output_str + 'Inject_rate'.ljust(20) + 'Energy'.ljust(20) + 'Latency'.ljust(20) + 'Deflect_rate'.ljust(20) + 'Throughput'.ljust(20) + '\n\n'
#output_str = output_str + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + 'BLESS'.ljust(10) + 'MBNoC'.ljust(10) + '\n'
output_str = '\n\n############# ' + 'Traffic = ' + traffic.ljust(20) + 'Network = ' + network.ljust(20) + ' ################\n\n'
type_stat = len(stat) / len(design)
#for i in range (0, type_stat):
space = (len(design)+1)*10
output_str = output_str + 'Energy'.ljust(space) + 'Latency'.ljust(space) + 'Throughput'.ljust(space) + 'Deflect_rate'.ljust(space) + '\n\n'
for i in range (1, 80, 1):
load = "{:.2f}".format(float(i)/100)
for j in range (0, len(stat)):
if j % len(design) is 0:
output_str = output_str + load.ljust(10)
if load in stat[j]:
output_str = output_str + str(stat[j][load]).ljust(10)
else:
output_str = output_str + '-'.ljust(10)
output_str = output_str + '\n'
#for i in range (0, len(stat[0])):
# for j in range (0, len(stat)):
# output_str = output_str + str(stat[j][i]).ljust(10)
# output_str = output_str + '\n'
output_str = output_str + '********* Based on %u data points ************' % len(stat[0])
print output_str
def print_synth_wrt_load (stat, design):
traffic = str(stat.pop(0))
network = str(stat.pop(0))
output_str = '\n\n############# ' + 'Traffic = ' + traffic.ljust(20) + 'Network = ' + network.ljust(20) + ' ################\n\n'
type_stat = len(stat) / len(design)
#for i in range (0, type_stat):
space = (len(design)+1)*10
output_str = output_str + 'Latency'.ljust(space) + 'Throughput'.ljust(space) + 'Deflect_rate'.ljust(space) + '\n\n'
for i in range (0, type_stat):
output_str = output_str + 'InjRate'.ljust(10)
for element in design:
output_str = output_str + element.ljust(10)
output_str = output_str + '\n'
for i in range (1, 80, 1):
load = "{:.2f}".format(float(i)/100)
for j in range (0, len(stat)):
if j % len(design) is 0:
output_str = output_str + load.ljust(10)
if load in stat[j]:
output_str = output_str + str(stat[j][load]).ljust(10)
else:
output_str = output_str + '-'.ljust(10)
output_str = output_str + '\n'
output_str = output_str + '********* Based on %u data points ************' % len(stat[0])
print output_str
def print_synth_avg_reduction (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
baseline = stat[0]
output_str = output_str + '\n' + '1'.ljust(10)
stat.pop(0)
for element in stat:
reduction = ''
if baseline > 0: reduction = "{:.2f}".format((baseline - element) / baseline)
output_str = output_str + reduction.ljust(10)
output_str = output_str + '\n'
print output_str
def print_synth_avg_gain (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
baseline = stat[0]
output_str = output_str + '\n' + '1'.ljust(10)
stat.pop(0)
for element in stat:
reduction = ''
if baseline > 0: reduction = "{:.2f}".format((element - baseline) / baseline)
output_str = output_str + reduction.ljust(10)
output_str = output_str + '\n'
print output_str
def print_final (stat, design):
output_str = ''
for element in design:
output_str = output_str + element.ljust(10)
output_str = output_str + '\n'
for element in stat:
output_str = output_str + "{:.2f}".format(float(element)).ljust(10)
output_str = output_str + '\n'
print output_str
| mit |
gfyoung/pandas | pandas/tests/arrays/integer/test_function.py | 5 | 6401 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask)
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.add, np.subtract])
def test_ufuncs_binary_int(ufunc):
# two IntegerArrays
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a, a)
expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
# IntegerArray with numpy array
arr = np.array([1, 2, 3, 4])
result = ufunc(a, arr)
expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
# IntegerArray with scalar
result = ufunc(a, 1)
expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
result = ufunc(1, a)
expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
def test_ufunc_binary_output():
a = pd.array([1, 2, np.nan])
result = np.modf(a)
expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float"))
expected = (pd.array(expected[0]), pd.array(expected[1]))
assert isinstance(result, tuple)
assert len(result) == 2
for x, y in zip(result, expected):
tm.assert_extension_array_equal(x, y)
@pytest.mark.parametrize("values", [[0, 1], [0, None]])
def test_ufunc_reduce_raises(values):
a = pd.array(values)
msg = r"The 'reduce' method is not supported."
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
@pytest.mark.parametrize(
"pandasmethname, kwargs",
[
("var", {"ddof": 0}),
("var", {"ddof": 1}),
("kurtosis", {}),
("skew", {}),
("sem", {}),
],
)
def test_stat_method(pandasmethname, kwargs):
s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64")
pandasmeth = getattr(s, pandasmethname)
result = pandasmeth(**kwargs)
s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64")
pandasmeth = getattr(s2, pandasmethname)
expected = pandasmeth(**kwargs)
assert expected == result
def test_value_counts_na():
arr = pd.array([1, 2, 1, pd.NA], dtype="Int64")
result = arr.value_counts(dropna=False)
expected = pd.Series([2, 1, 1], index=[1, 2, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = pd.Series([2, 1], index=[1, 2], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_empty():
# https://github.com/pandas-dev/pandas/issues/33317
s = pd.Series([], dtype="Int64")
result = s.value_counts()
# TODO: The dtype of the index seems wrong (it's int64 for non-empty)
idx = pd.Index([], dtype="object")
expected = pd.Series([], index=idx, dtype="Int64")
tm.assert_series_equal(result, expected)
def test_value_counts_with_normalize():
# GH 33172
s = pd.Series([1, 2, 1, pd.NA], dtype="Int64")
result = s.value_counts(normalize=True)
expected = pd.Series([2, 1], index=[1, 2], dtype="Float64") / 3
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 4])
def test_integer_array_sum(skipna, min_count, any_nullable_int_dtype):
dtype = any_nullable_int_dtype
arr = pd.array([1, 2, 3, None], dtype=dtype)
result = arr.sum(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 6
else:
assert result is pd.NA
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("method", ["min", "max"])
def test_integer_array_min_max(skipna, method, any_nullable_int_dtype):
dtype = any_nullable_int_dtype
arr = pd.array([0, 1, None], dtype=dtype)
func = getattr(arr, method)
result = func(skipna=skipna)
if skipna:
assert result == (0 if method == "min" else 1)
else:
assert result is pd.NA
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("min_count", [0, 9])
def test_integer_array_prod(skipna, min_count, any_nullable_int_dtype):
dtype = any_nullable_int_dtype
arr = pd.array([1, 2, None], dtype=dtype)
result = arr.prod(skipna=skipna, min_count=min_count)
if skipna and min_count == 0:
assert result == 2
else:
assert result is pd.NA
@pytest.mark.parametrize(
"values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)]
)
def test_integer_array_numpy_sum(values, expected):
arr = pd.array(values, dtype="Int64")
result = np.sum(arr)
assert result == expected
@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"])
def test_dataframe_reductions(op):
# https://github.com/pandas-dev/pandas/pull/32867
# ensure the integers are not cast to float during reductions
df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")})
result = df.max()
assert isinstance(result["a"], np.int64)
# TODO(jreback) - these need testing / are broken
# shift
# set_index (destroys type)
| bsd-3-clause |
nolanliou/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 15 | 10904 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API.
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering."""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
abretaud/tools-iuc | tools/heinz/heinz_scoring.py | 21 | 3661 | #!/usr/bin/env python
"""Calculate scores for Heinz.
This script transform a p-value into a score:
1. Use alpha and lambda to calculate a threshold P-value.
2. Calculate a score based on each P-value by alpha and the threshold.
For more details, please refer to the paper doi:10.1093/bioinformatics/btn161
Input:
P-values from DESeq2 result: first column: names, second column P-values
Output:
Scores, which will be used as the input of Heinz.
First column: names, second column: scores.
Python 3 is required.
"""
# Implemented by: Chao (Cico) Zhang
# Homepage: https://Hi-IT.org
# Date: 14 Mar 2017
# Last modified: 23 May 2018
import argparse
import sys
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description='Transform a P-value into a '
'score which can be used as the input of '
'Heinz')
parser.add_argument('-n', '--node', required=True, dest='nodes',
metavar='nodes_pvalue.txt', type=str,
help='Input file of nodes with P-values')
parser.add_argument('-f', '--fdr', required=True, dest='fdr',
metavar='0.007', type=float, help='Choose a value of FDR')
parser.add_argument('-m', '--model', required=False, dest='param_file',
metavar='param.txt', type=str,
help='A txt file contains model params as input')
parser.add_argument('-a', '--alpha', required=False, dest='alpha',
metavar='0.234', type=float, default=0.5,
help='Single parameter alpha as input if txt input is '
'not provided')
parser.add_argument('-l', '--lambda', required=False, dest='lam',
metavar='0.345', type=float, default=0.5,
help='Single parameter lambda as input if txt input is '
'not provided')
parser.add_argument('-o', '--output', required=True, dest='output',
metavar='scores.txt', type=str,
help='The output file to store the calculated scores')
args = parser.parse_args()
# Check if the parameters are complete
if args.output is None:
sys.exit('Output file is not designated.')
if args.nodes is None:
sys.exit('Nodes with p-values must be provided.')
if args.fdr is None:
sys.exit('FDR must be provided')
if args.fdr >= 1 or args.fdr <= 0:
sys.exit('FDR must greater than 0 and smaller than 1')
# run heinz-print according to the input type
if args.param_file is not None: # if BUM output is provided
with open(args.param_file) as p:
params = p.readlines()
lam = float(params[0]) # Maybe this is a bug
alpha = float(params[1]) # Maybe this is a bug
# if BUM output is not provided
elif args.alpha is not None and args.lam is not None:
lam = args.lam
alpha = args.alpha
else: # The input is not complete
sys.exit('The parameters of the model are incomplete.')
# Calculate the threshold P-value
pie = lam + (1 - lam) * alpha
p_threshold = np.power((pie - lam * args.fdr) / (args.fdr - lam * args.fdr),
1 / (alpha - 1))
print(p_threshold)
# Calculate the scores
input_pvalues = pd.read_csv(args.nodes, sep='\t', names=['node', 'pvalue'])
input_pvalues.loc[:, 'score'] = input_pvalues.pvalue.apply(lambda x:
(alpha - 1) * (np.log(x) - np.log(p_threshold)))
# print(input_pvalues.loc[:, ['node', 'score']])
input_pvalues.loc[:, ['node', 'score']].to_csv(args.output, sep='\t',
index=False, header=False)
| mit |
MechCoder/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
gingi99/research_dr | python/MLEM2/discrimination.py | 1 | 12551 | # coding: utf-8
# python 3.5
import sys
import os
sys.path.append('/Users/ooki/git/research_dr/python/MLEM2')
sys.path.append(os.path.dirname(os.path.abspath("__file__"))+'/../MLEM2')
from sklearn.metrics import accuracy_score
import copy
import importlib
import mlem2
import LERS
importlib.reload(mlem2)
importlib.reload(LERS)
from rules_stat import getNumRulesClass
from rules_stat import getRulesValueCount
# =====================================
# 公正配慮すべき属性list_sをdecision_tableから削除する
# =====================================
def delDiscriminativeAttributes(decision_table, list_s):
return(decision_table.drop(list_s, axis=1))
# =====================================
# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットの数を返す
# =====================================
def getNumRulesIncludeAttr(list_rules, attr) :
rules = [r for r in list_rules if attr in r.getKey()]
return(len(rules))
def getNumRulesIncludeE(list_rules, attr, v) :
rules = [r for r in list_rules if r.getValue(attr) == v]
return(len(rules))
def getNumRulesClassIncludeAttr(list_rules, attr, cls) :
rules = [r for r in list_rules if (attr in r.getKey()) and r.getConsequent() == cls]
return(len(rules))
def getNumRulesClassIncludeE(list_rules, attr, v, cls) :
rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]
return(len(rules))
def getNumRulesIncludeMultipleE(list_rules, dict_attribute_value):
tmp_rules = list_rules
for attr in dict_attribute_value.keys():
for v in dict_attribute_value[attr] :
tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v]
return(len(tmp_rules))
def getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, cls):
tmp_rules = list_rules
for attr in dict_attribute_value.keys():
for v in dict_attribute_value[attr] :
tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v and r.getConsequent() == cls]
return(len(tmp_rules))
# ======================================
# 分割表a, b, c, d を返す
# ======================================
def getContingencyTable(list_rules, dict_attribute_value, CLASSES):
N = len(list_rules)
n1 = getNumRulesClass(list_rules, CLASSES["bad"])
n2 = getNumRulesClass(list_rules, CLASSES["good"])
a = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES["bad"])
b = n1 - a
c = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES["good"])
d = n2 - c
return(a,b,c,d)
# =====================================
# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットを返す
# =====================================
def getRulesIncludeAttr(list_rules, attr) :
rules = [r for r in list_rules if attr in r.getKey()]
return(rules)
def getRulesIncludeE(list_rules, attr, v) :
rules = [r for r in list_rules if r.getValue(attr) == v]
return(rules)
# =====================================
# Rules のうち 属性attr / 基本条件e を 含まないルールセットを返す
# =====================================
def getRulesExcludeAttr(list_rules, attr) :
rules = [r for r in list_rules if not attr in r.getKey()]
return(rules)
def getRulesExcludeE(list_rules, attr, v) :
rules = [r for r in list_rules if r.getValue(attr) != v]
return(rules)
# =====================================
# Rules のうち 属性attr / 基本条件e を 削除したルールセットを返す
# Rule の 属性attr / 基本条件 e を削除したルールを返す
# =====================================
def getRulesDelAttr(list_rules, attr) :
rules = [delAttrFromRule(r, attr) for r in list_rules]
return(rules)
def getRulesDelE(list_rules, attr, v) :
rules = [delEFromRule(r, attr, v) for r in list_rules]
return(rules)
def delAttrFromRule(rule, attr) :
rule_new = copy.deepcopy(rule)
rule_new.delKey(attr)
return(rule_new)
def delEFromRule(rule, attr, v) :
if rule.getValue(attr) == v : return(delAttrFromRule(rule, attr))
else : return(rule)
# =====================================
# alpha差別的な Rule を含まないルールセットを返す
# alpha差別的な Rule の 基本条件 e を削除したルールを返す
# =====================================
def getAlphaRulesExcludeE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :
rules = [r for r in list_rules if getElift(r, attr, v, decision_table, list_judgeNominal) <= alpha ]
return(rules)
def getAlphaRulesDelE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :
rules = [delEFromAlphaRule(r, attr, v, decision_table, list_judgeNominal, alpha = 0) for r in list_rules]
return(rules)
def delEFromAlphaRule(rule, attr, v, decision_table, list_judgeNominal, alpha = 0):
if rule.getValue(attr) == v :
elift = getElift(rule, attr, v, decision_table, list_judgeNominal)
if elift > alpha : return(delAttrFromRule(rule, attr))
else : return(rule)
else :
return(rule)
# =====================================
# M差別的な Rule の を含まない / 基本条件 e を削除したルールセットを返す
# =====================================
def getMRulesFUN(list_rules, attr, v, target_cls, DELFUN, m = 0) :
num_target_cls, num_other_cls, list_num_other_cls = 0, 0, []
classes = mlem2.getEstimatedClass(list_rules)
for cls in classes :
if cls == target_cls :
num_target_cls = getNumRulesClassIncludeE(list_rules, attr, v, cls)
else :
list_num_other_cls.append(getNumRulesClassIncludeE(list_rules, attr, v, cls))
num_other_cls = sum(list_num_other_cls) / len(list_num_other_cls) #複数クラスの場合を考慮
if (num_target_cls / (num_target_cls + num_other_cls)) > m : #m保護なら
return(list_rules)
else :
return(DELFUN(list_rules, attr, v))
# =====================================
# 配慮変数sをもつ対象だけの決定表を作る
# =====================================
def createDTSuppoterdbyRule(list_rules, attr, v, cls, decision_table):
target_indice = []
target_rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]
for rule in target_rules:
target_indice.extend(rule.getSupport())
target_indice = list(set(target_indice))
target_indice = sorted(target_indice)
new_decision_table = decision_table_train.ix[target_indice]
new_decision_class = new_decision_table[new_decision_table.columns[-1]].values.tolist()
return(new_decision_table, new_decision_class)
# 有利な決定クラスのルールを減らす関数 配慮変数sを
# =====================================
# Rule の 配慮変数s での decision_tableにおける elift
# =====================================
def getElift(rule, attr, v, decision_table, list_judgeNominal):
supp, conf = LERS.getSupportConfidence(rule, decision_table, list_judgeNominal)
rule_s = delEFromRule(rule, attr, v)
supp_s, conf_s = LERS.getSupportConfidence(rule_s, decision_table, list_judgeNominal)
if conf_s == 0: elift = 999
else : elift = conf / conf_s
return(elift)
# =====================================
# Rule の 配慮変数s での decision_tableにおける slift
# =====================================
def getSlift(rule, s, decision_table, operator):
conditions = mlem2.getConditionValues(decision_table, s)
clifts = [getClift(rule, s, c, decision_table) for c in conditions]
slift = operator(clifts)
return(slift)
# =====================================
# Rule の 配慮変数s と 代替する変数c での decision_tableにおける clift
# =====================================
def getClift(rule, s, c, decision_table, list_judgeNominal):
supp, conf = LERS.getSupportConfidence(rule, decision_table,list_judgeNominal)
rule_c = mlem2.delEfromRule(rule,s)
rule_c = rule_c.setValue(s,c)
supp_c, conf_c = LERS.getSupportConfidence(rule_c, decision_table, list_judgeNominal)
clift = conf / conf_c
return(clift)
# ====================================
# Attribute Value dict を stringにして返す
# ====================================
def strAttributeValue(ATTRIBUTE_VALUE) :
list_string = []
for i in ATTRIBUTE_VALUE :
list_string.append(i+"-".join(ATTRIBUTE_VALUE[i]))
return("+".join(list_string))
# ====================================
# Attribute Value dict を stringにして返す
# ====================================
def getItemSet(rule_value) :
itemset = set()
for attr in rule_value :
itemset.add(attr+"-".join(rule_value[attr]))
return(itemset)
def jaccard(set1, set2):
set_and = set1 & set2
set_or = set1 | set2
if len(set_or) == 0 :
return(0)
else :
return(len(set_and)/len(set_or))
# ========================================
# main
# ========================================
if __name__ == "__main__":
# 設定
DIR_UCI = '/mnt/data/uci/'
FILENAME = 'german_credit_categorical'
iter1 = 1
iter2 = 1
# rule induction
rules = mlem2.getRulesByMLEM2(FILENAME, iter1, iter2)
# test data
filepath = DIR_UCI+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv'
decision_table_test = mlem2.getDecisionTable(filepath)
decision_table_test = decision_table_test.dropna()
decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()
# nominal data
filepath = DIR_UCI+FILENAME+'/'+FILENAME+'.nominal'
list_nominal = mlem2.getNominalList(filepath)
list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal)
# predict by LERS
predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal)
# 正答率を求める
accuracy_score(decision_class, predictions)
# rules の数を求める
num = len(rules)
# 平均の長さを求める
mean_length = mlem2.getMeanLength(rules)
# train data setup
decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T = "train")
list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME)
# 平均支持度と平均確信度を求める
mean_support, mean_conf = LERS.getSupportConfidenceRules(rules, decision_table_train, list_judgeNominal)
# AccとRecallを求める
acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal)
for i,c in enumerate(mlem2.getEstimatedClass(rules)):
print(str(acc_recall[i][0])+","+str(acc_recall[i][1]))
###### 公正配慮のテスト
# 基本条件を含むルールセット
rules_sex_2 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status", "2.0")
rules_sex_4 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status", "4.0")
# 条件を含まないルールセット
rules_exclude_sex = mlem2.getRulesExcludeAttr(rules, "Sex_Marital_Status")
# 基本条件を含まないルールセット
rules_exclude_sex_1 = mlem2.getRulesExcludeE(rules, "Sex_Marital_Status", "1.0")
# 条件を削除したルールセット
rules_del_value = mlem2.getRulesDelAttr(rules, "Value_Savings_Stocks")
# 基本条件を削除したルールセット
rules_del_value_1 = mlem2.getRulesDelE(rules, "Value_Savings_Stocks", "1.0")
# 条件を1つ削除する例
rule = mlem2.delAttrFromRule(rules[12],'No_of_dependents')
rule = mlem2.delAttrFromRule(rules[12],'Concurrent_Credits')
# ====
# read data
filepath = '/mnt/data/uci/'+FILENAME+'/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.tsv'
decision_table = mlem2.getDecisionTable(filepath)
decision_table = decision_table.dropna()
decision_table.index = range(decision_table.shape[0])
# read nominal
filepath = '/mnt/data/uci/'+'/'+FILENAME+'/'+FILENAME+'.nominal'
list_nominal = mlem2.getNominalList(filepath)
# ルールを満たすやつ ほとんどないな。。
match_objects = decision_table.apply(lambda obj: isExplainRule(obj, rules[12], list_judgeNominal), axis=1)
# confidence
getConfidence(rule, decision_table, list_judgeNominal)
rules_sex_2 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status","2.0")
| mit |
istellartech/OpenGoddard | examples/11_Polar_TSTO_Taiki.py | 1 | 19117 | # -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
from scipy import interpolate
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Rocket:
# Atmosphere Parameter
# Use US Standard Atmosphere 1976
stdAtmo = np.loadtxt("./11_Polar_TSTO_Taiki/US_standard_atmosphere.csv",delimiter=",",skiprows=2)
stdAltitude = stdAtmo[:,0] * 1000.0 #converted to km -> m
stdPressure= stdAtmo[:,2] # [Pa]
stdDensity= stdAtmo[:,3] # [kg/m3]
stdSoundSpeed = stdAtmo[:,4] # [m/s]
# 線形補完用
# 高度範囲外(<0, 86<)はfill_valueが外挿
airPressure = interpolate.interp1d(stdAltitude, stdPressure, bounds_error = False, fill_value = (stdPressure[0], 0.0))
airDensity = interpolate.interp1d(stdAltitude, stdDensity, bounds_error = False, fill_value = (stdDensity[0], 0.0))
airSound = interpolate.interp1d(stdAltitude, stdSoundSpeed, bounds_error = False, fill_value = (stdSoundSpeed[0], stdSoundSpeed[-1]))
# Drag Coefficient
CdLog = np.loadtxt("./11_Polar_TSTO_Taiki/Cd.csv", delimiter=",", skiprows=1)
Cd = interpolate.interp1d(CdLog[:,0], CdLog[:,1],fill_value="extrapolate")
def __init__(self):
# Earth Parameter
self.GMe = 3.986004418 * 10**14 # Earth gravitational constant [m^3/s^2]
self.Re = 6371.0 * 1000 # Earth Radius [m]
self.g0 = 9.80665 # Gravitational acceleration on Earth surface [m/s^2]
# Target Parameter
self.Htarget = 561.0 * 1000 # Altitude [m]
self.Rtarget = self.Re + self.Htarget # Orbit Radius [m]
self.Vtarget = np.sqrt(self.GMe / self.Rtarget) # [m/s]
# Launch Site Parameter
self.lat_taiki = 42.506167 # [deg]
self.Vt_equator = 1674.36 # [km/h]
self.Vt_taiki = self.Vt_equator * np.cos(self.lat_taiki * np.pi / 180.0) * 1000.0 / 3600.0 # Radial Velocity of Earth Surface [m/s]
self.inclination = 96.7 # [deg]
self.V0 = self.Vt_taiki * np.cos(-self.inclination * np.pi / 180.0) # [m/s]
self.H0 = 10.0 # Initial Altitude [m]
# Structure Parameter
# Mdryがパラメータ
self.Mdry = [1300.0, 220.0] # Dry Mass [kg], [1st stage, 2nd stage]
self.beta = [10.0, 15.0] # Structure Efficienty [%], [1st stage, 2nd stage]
self.Mpayload = 100.0 # Payload Mass [kg]
self.M0 = [self.Mdry[0] / self.beta[0] * 100.0, self.Mdry[1] / self.beta[1] * 100.0] # Initial Stage Mass [kg], [1st stage, 2nd stage]
self.Mp = [self.M0[0] - self.Mdry[0], self.M0[1] - self.Mdry[1]] # Propellant Mass [kg], [1st stage, 2nd stage]
self.M0[1] = self.M0[1] + self.Mpayload
self.Minit = self.M0[0] + self.M0[1] # Initial Total Mass [kg]
self.d = [1.8, 1.8] # Diameter [m], [1st stage, 2nd stage]
self.A = [0.25 * self.d[0] ** 2 * np.pi, 0.25 * self.d[1] ** 2 * np.pi] # Projected Area [m^2], [1st stage, 2nd stage]
# Engine Parameter
self.Cluster = 9
self.Isp = [261.0 + 0.0, 322.0 + 0.0] # Specific Impulse [s], [1st stage at SL, 2nd stage at vac]
self.dth = [53.9, 53.9] # Throat Diameter [mm], [1st stage, 2nd stage]
self.Ath = [0.25 * (self.dth[0] / 1000.0) ** 2 * np.pi, 0.25 * (self.dth[1] / 1000.0) ** 2 * np.pi] # Throat Area [m^2], [1st stage, 2nd stage]
self.AR = [20.0, 140.0] # Area Ratio, [1st stage, 2nd stage]
self.Ae = [self.Ath[0] * self.AR[0] * self.Cluster, self.Ath[1] * self.AR[1]] # Exit Area [m^2], [1st stage, 2nd stage]
# =======
self.ThrustMax = [33.3, 4.2] # Maximum Thrust [ton], [1st stage at SL, 2nd stage at vac]
self.ThrustMax = [self.ThrustMax[0] * self.g0 * 1000.0, self.ThrustMax[1] * self.g0 * 1000.0] # [N]
# self.ThrustLevel = 1.8 # [G] M0[0] * n G
# self.ThrustMax = [self.M0[0] * self.ThrustLevel * self.g0, self.M0[0] * self.ThrustLevel / self.Cluster * self.g0 + self.airPressure(self.Htarget) * self.Ae[1]] # Maximum Thrust [N], [1st stage at SL, 2nd stage at vac]
# =======
self.refMdot = [self.ThrustMax[0] / (self.Isp[0] * self.g0), self.ThrustMax[1] / (self.Isp[1] * self.g0)] # Isp補正用参照値
self.MaxQ = 500000.0 # Pa
self.MaxG = 20.0 # G
def dynamics(prob, obj, section):
R = prob.states(0, section) # Orbit Radius [m]
theta = prob.states(1, section) #
Vr = prob.states(2, section)
Vt = prob.states(3, section)
m = prob.states(4, section)
Tr = prob.controls(0, section)
Tt = prob.controls(1, section)
g0 = obj.g0
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
dThrust = [(obj.airPressure(obj.H0) - obj.airPressure(R - obj.Re)) * obj.Ae[0], obj.airPressure(R - obj.Re) * obj.Ae[1]]
Isp = obj.Isp[section] + dThrust[section] / (obj.refMdot[section] * g0)
# US standard atmosphereだと86 km以降はrho = 0でDrag = 0
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[section] # [N]
dx = Dynamics(prob, section)
dx[0] = Vr
dx[1] = Vt / R
dx[2] = Tr / m - Dr / m - g + Vt**2 / R
dx[3] = Tt / m - Dt / m - (Vr * Vt) / R
dx[4] = - np.sqrt(Tr**2 + Tt**2) / (Isp * g0)
return dx()
def equality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
unit_R = prob.unit_states[0][0]
unit_V = prob.unit_states[0][2]
unit_m = prob.unit_states[0][4]
result = Condition()
# event condition
result.equal(R0[0], obj.Re + obj.H0, unit=unit_R) # 初期地表
result.equal(theta0[0], 0.0)
result.equal(Vr0[0], 0.0, unit=unit_V)
result.equal(Vt0[0], obj.V0 , unit=unit_V)
result.equal(m0[0], obj.Minit, unit=unit_m) # (1st stage and 2nd stage and Payload) initial
# knotting condition
result.equal(m1[0], obj.M0[1], unit=unit_m) # (2nd stage + Payload) initial
result.equal(R1[0], R0[-1], unit=unit_R)
result.equal(theta1[0], theta0[-1])
result.equal(Vr1[0], Vr0[-1], unit=unit_V)
result.equal(Vt1[0], Vt0[-1], unit=unit_V)
# Target Condition
result.equal(R1[-1], obj.Rtarget, unit=unit_R) # Radius
result.equal(Vr[-1], 0.0, unit=unit_V) # Radius Velocity
result.equal(Vt[-1], obj.Vtarget, unit=unit_V)
return result()
def inequality(prob, obj):
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
tf = prob.time_final(-1)
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
theta0 = prob.states(1, 0)
theta1 = prob.states(1, 1)
Vr0 = prob.states(2, 0)
Vr1 = prob.states(2, 1)
Vt0 = prob.states(3, 0)
Vt1 = prob.states(3, 1)
m0 = prob.states(4, 0)
m1 = prob.states(4, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr0 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt0 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dr1 = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
Dt1 = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[1] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r0 = (Tr - Dr0) / m
a_t0 = (Tt - Dt0) / m
a_mag0 = np.sqrt(a_r0**2 + a_t0**2) # [m/s2]
a_r1 = (Tr - Dr1) / m
a_t1 = (Tt - Dt1) / m
a_mag1 = np.sqrt(a_r1**2 + a_t1**2) # [m/s2]
# Thrust
T0 = np.sqrt(Tr0**2 + Tt0**2)
T1 = np.sqrt(Tr1**2 + Tt1**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
result = Condition()
# lower bounds
result.lower_bound(R, obj.Re, unit=prob.unit_states[0][0]) # 地表以下
result.lower_bound(m0, obj.Mdry[0] + obj.M0[1], unit=prob.unit_states[0][4]) # 乾燥質量以下
result.lower_bound(m1, obj.Mdry[1], unit=prob.unit_states[0][4])
result.lower_bound(Tr, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
result.lower_bound(Tt, -obj.ThrustMax[1], unit=prob.unit_controls[0][0])
# upper bounds
result.upper_bound(m0, obj.Minit, unit=prob.unit_states[0][4]) # 初期質量以上
result.upper_bound(m1, obj.M0[1], unit=prob.unit_states[0][4])
result.upper_bound(Tr0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tt0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(T0, obj.ThrustMax[0] + dThrust0, unit=prob.unit_controls[0][0])
result.upper_bound(Tr1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(Tt1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(T1, obj.ThrustMax[1] + dThrust1, unit=prob.unit_controls[0][0])
result.upper_bound(q, obj.MaxQ, unit = prob.unit_states[0][0])
result.upper_bound(a_mag0, obj.MaxG * obj.g0)
result.upper_bound(a_mag1, obj.MaxG * obj.g0)
return result()
def cost(prob, obj):
m1 = prob.states(4, 1)
return -m1[-1] / prob.unit_states[1][4]
# ========================
# Program Starting Point
time_init = [0.0, 200, 800]
n = [20, 30]
num_states = [5, 5]
num_controls = [2, 2]
max_iteration = 90
flag_savefig = True
savefig_file = "./11_Polar_TSTO_Taiki/TSTO_"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
# ------------------------
# create instance of operating object
obj = Rocket()
unit_R = obj.Re
unit_theta = 1
unit_V = np.sqrt(obj.GMe / obj.Re)
unit_m = obj.M0[0]
unit_t = unit_R / unit_V
unit_T = unit_m * unit_R / unit_t ** 2
prob.set_unit_states_all_section(0, unit_R)
prob.set_unit_states_all_section(1, unit_theta)
prob.set_unit_states_all_section(2, unit_V)
prob.set_unit_states_all_section(3, unit_V)
prob.set_unit_states_all_section(4, unit_m)
prob.set_unit_controls_all_section(0, unit_T)
prob.set_unit_controls_all_section(1, unit_T)
prob.set_unit_time(unit_t)
# ========================
# Initial parameter guess
# altitude profile
R_init = Guess.cubic(prob.time_all_section, obj.Re, 0.0, obj.Rtarget, 0.0)
# Guess.plot(prob.time_all_section, R_init, "Altitude", "time", "Altitude")
# if(flag_savefig):plt.savefig(savefig_file + "guess_alt" + ".png")
# theta
theta_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, np.deg2rad(25.0), 0.0)
# velocity
Vr_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
Vt_init = Guess.linear(prob.time_all_section, obj.V0, obj.Vtarget)
# Guess.plot(prob.time_all_section, V_init, "Velocity", "time", "Velocity")
# mass profile -0.6
M_init0 = Guess.cubic(prob.time_all_section, obj.Minit, 0.0, obj.Mdry[0] + obj.M0[1], 0.0)
M_init1 = Guess.cubic(prob.time_all_section, obj.M0[1], 0.0, obj.Mdry[1], 0.0)
M_init = np.hstack((M_init0, M_init1))
# Guess.plot(prob.time_all_section, M_init, "Mass", "time", "Mass")
# if(flag_savefig):plt.savefig(savefig_file + "guess_mass" + ".png")
# thrust profile
# T_init = Guess.zeros(prob.time_all_section)
Tr_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]*9/10, 0.0, 0.0, 0.0)
Tr_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]*9/10, 0.0, 0.0, 0.0)
Tr_init = np.hstack((Tr_init0, Tr_init1))
# Tt_init = Guess.cubic(prob.time_all_section, 0.0, 0.0, 0.0, 0.0)
Tt_init0 = Guess.cubic(prob.time[0], obj.ThrustMax[0]/10, 0.0, 0.0, 0.0)
Tt_init1 = Guess.cubic(prob.time[1], obj.ThrustMax[1]/10, 0.0, 0.0, 0.0)
Tt_init = np.hstack((Tr_init0, Tr_init1))
# Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
# if(flag_savefig):plt.savefig(savefig_file + "guess_thrust" + ".png")
# plt.show()
# ========================
# Substitution initial value to parameter vector to be optimized
# non dimensional values (Divide by scale factor)
prob.set_states_all_section(0, R_init)
prob.set_states_all_section(1, theta_init)
prob.set_states_all_section(2, Vr_init)
prob.set_states_all_section(3, Vt_init)
prob.set_states_all_section(4, M_init)
prob.set_controls_all_section(0, Tr_init)
prob.set_controls_all_section(1, Tt_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics, dynamics]
prob.knot_states_smooth = [False]
prob.cost = cost
# prob.cost_derivative = cost_derivative
prob.equality = equality
prob.inequality = inequality
def display_func():
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
m = prob.states_all_section(4)
ts = prob.time_knots()
tf = prob.time_final(-1)
print("m0 : {0:.5f}".format(m[0]))
print("mf : {0:.5f}".format(m[-1]))
print("mdry : {0:.5f}".format(obj.Mdry[0]))
print("payload : {0:.5f}".format(m[-1] - obj.Mdry[1]))
print("max altitude: {0:.5f}".format(R[-1] - obj.Re))
print("MECO time : {0:.3f}".format(ts[1]))
print("final time : {0:.3f}".format(tf))
prob.solve(obj, display_func, ftol=1e-8)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
R = prob.states_all_section(0)
theta = prob.states_all_section(1)
Vr = prob.states_all_section(2)
Vt = prob.states_all_section(3)
m = prob.states_all_section(4)
Tr = prob.controls_all_section(0)
Tt = prob.controls_all_section(1)
time = prob.time_update()
R0 = prob.states(0, 0)
R1 = prob.states(0, 1)
Tr0 = prob.controls(0, 0)
Tr1 = prob.controls(0, 1)
Tt0 = prob.controls(1, 0)
Tt1 = prob.controls(1, 1)
# ------------------------
# Calculate necessary variables
rho = obj.airDensity(R - obj.Re)
Mach = np.sqrt(Vr**2 + Vt**2) / obj.airSound(R - obj.Re)
Cd = obj.Cd(Mach)
Dr = 0.5 * rho * Vr * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
Dt = 0.5 * rho * Vt * np.sqrt(Vr**2 + Vt**2) * Cd * obj.A[0] # [N]
g = obj.g0 * (obj.Re / R)**2 # [m/s2]
# dynamic pressure
q = 0.5 * rho * (Vr**2 + Vt**2) # [Pa]
# accelaration
a_r = (Tr - Dr) / m / obj.g0
a_t = (Tt - Dt) / m / obj.g0
a_mag = np.sqrt(a_r**2 + a_t**2) / obj.g0 # [G]
# Thrust
T = np.sqrt(Tr**2 + Tt**2)
dThrust0 = (obj.airPressure(obj.H0) - obj.airPressure(R0 - obj.Re)) * obj.Ae[0]
dThrust1 = obj.airPressure(R1 - obj.Re) * obj.Ae[1]
Isp0 = obj.Isp[0] + dThrust0 / (obj.refMdot[0] * obj.g0)
Isp1 = obj.Isp[1] + dThrust1 / (obj.refMdot[1] * obj.g0)
Thrust_SL = T - np.append(dThrust0, dThrust1)
np.savetxt(savefig_file + "Thrust_Log" + ".csv", np.hstack((time, Thrust_SL, T, Tr, Tt)), delimiter=',')
# ------------------------
# Visualizetion
plt.close("all")
plt.figure()
plt.title("Altitude profile")
plt.plot(time, (R - obj.Re) / 1000, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "altitude" + ".png")
np.savetxt(savefig_file + "Altitude_Log" + ".csv", np.hstack((time, (R - obj.Re))), delimiter=',')
plt.figure()
plt.title("Velocity")
plt.plot(time, Vr, marker="o", label="Vr")
plt.plot(time, Vt, marker="o", label="Vt")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [m/s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "velocity" + ".png")
np.savetxt(savefig_file + "Velocity_Log" + ".csv", np.hstack((time, Vr, Vt)), delimiter=',')
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [kg]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "mass" + ".png")
np.savetxt(savefig_file + "Mass_Log" + ".csv", np.hstack((time, m)), delimiter=',')
plt.figure()
plt.title("Acceleration")
plt.plot(time, a_r, marker="o", label="Acc r")
plt.plot(time, a_t, marker="o", label="Acc t")
plt.plot(time, a_mag, marker="o", label="Acc")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Acceleration [G]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "acceleration" + ".png")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, Tr / 1000, marker="o", label="Tr")
plt.plot(time, Tt / 1000, marker="o", label="Tt")
plt.plot(time, T / 1000, marker="o", label="Thrust")
plt.plot(time, Dr / 1000, marker="o", label="Dr")
plt.plot(time, Dt / 1000, marker="o", label="Dt")
plt.plot(time, m * g / 1000, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [kN]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "force" + ".png")
plt.figure()
plt.title("Flight trajectory")
plt.plot(theta * obj.Re / 1000, (R - obj.Re) / 1000, marker="o", label="trajectory")
plt.grid()
plt.xlabel("Downrange [km]")
plt.ylabel("Altitude [km]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "trajectory" + ".png")
plt.figure()
plt.title("DeltaThrust profile")
plt.plot(time, np.append(dThrust0, dThrust1), marker="o", label="dThrust")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("dThrust [N]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "dforce" + ".png")
plt.figure()
plt.title("Isp profile")
plt.plot(time, np.append(Isp0, Isp1), marker="o", label="Isp")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Isp [s]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "Isp" + ".png") | mit |
algorithmic-music-exploration/amen | amen/timing.py | 1 | 3596 | #!/usr/bin/env python
'''Timing interface'''
from bisect import bisect_left, bisect_right
import numpy as np
import pandas as pd
import librosa
class TimeSlice(object):
"""
A slice of time: has a start time, a duration, and a reference to an Audio object.
"""
def __init__(self, time, duration, audio, unit='s'):
self.time = pd.to_timedelta(time, unit=unit)
self.duration = pd.to_timedelta(duration, unit=unit)
self.audio = audio
def __repr__(self):
args = self.time.delta * 1e-9, self.duration.delta * 1e-9
return '<TimeSlice, start: {0:.2f}, duration: {1:.2f}>'.format(*args)
def get_samples(self):
"""
Gets the samples corresponding to this TimeSlice from the parent audio object.
"""
start = self.time.delta * 1e-9
duration = self.duration.delta * 1e-9
starting_sample, ending_sample = librosa.time_to_samples(
[start, start + duration], self.audio.sample_rate
)
left_offsets, right_offsets = self._get_offsets(
starting_sample, ending_sample, self.audio.num_channels
)
samples = self._offset_samples(
starting_sample,
ending_sample,
left_offsets,
right_offsets,
self.audio.num_channels,
)
return samples, left_offsets[0], right_offsets[0]
def _get_offsets(self, starting_sample, ending_sample, num_channels):
"""
Find the offset to the next zero-crossing, for each channel.
"""
offsets = []
for zero_index in self.audio.zero_indexes:
index = bisect_left(zero_index, starting_sample) - 1
if index < 0:
starting_offset = 0
else:
starting_crossing = zero_index[index]
starting_offset = starting_crossing - starting_sample
index = bisect_left(zero_index, ending_sample)
if index >= len(zero_index):
ending_offset = 0
else:
zci = min(bisect_right(zero_index, ending_sample), len(zero_index) - 1)
ending_crossing = zero_index[zci]
ending_offset = ending_crossing - ending_sample
offsets.append((starting_offset, ending_offset))
if num_channels == 1:
results = (offsets[0], offsets[0])
elif num_channels == 2:
results = (offsets[0], offsets[1])
return results
def _offset_samples(
self, starting_sample, ending_sample, left_offsets, right_offsets, num_channels
):
"""
Does the offset itself.
"""
left_slice = (
0,
slice(starting_sample + left_offsets[0], ending_sample + left_offsets[1]),
)
right_slice = left_slice
if num_channels == 2:
right_slice = (
1,
slice(
starting_sample + right_offsets[0], ending_sample + right_offsets[1]
),
)
left_channel = self.audio.raw_samples[left_slice]
right_channel = self.audio.raw_samples[right_slice]
return np.array([left_channel, right_channel])
class TimingList(list):
"""
A list of TimeSlices.
"""
def __init__(self, name, timings, audio, unit='s'):
super(self.__class__, self).__init__()
self.name = name
for (start, duration) in timings:
time_slice = TimeSlice(start, duration, audio, unit=unit)
self.append(time_slice)
| bsd-2-clause |
akionakamura/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/datasets/_california_housing.py | 11 | 6174 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/datasets/
The data contains 20,640 observations on 9 variables.
This dataset contains the average house value as target variable
and the following input variables (features): average income,
housing average age, average rooms, average bedrooms, population,
average occupation, latitude, and longitude in that order.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from os.path import dirname, exists, join
from os import makedirs, remove
import tarfile
import numpy as np
import logging
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import _pkl_filepath
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils.validation import _deprecate_positional_args
# The original data can be found at:
# https://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.tgz
ARCHIVE = RemoteFileMetadata(
filename='cal_housing.tgz',
url='https://ndownloader.figshare.com/files/5976036',
checksum=('aaa5c9a6afe2225cc2aed2723682ae40'
'3280c4a3695a2ddda4ffb5d8215ea681'))
logger = logging.getLogger(__name__)
@_deprecate_positional_args
def fetch_california_housing(*, data_home=None, download_if_missing=True,
return_X_y=False, as_frame=False):
"""Load the California housing dataset (regression).
============== ==============
Samples total 20640
Dimensionality 8
Features real
Target real 0.15 - 5.
============== ==============
Read more in the :ref:`User Guide <california_housing_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False.
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
.. versionadded:: 0.23
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray, shape (20640, 8)
Each row corresponding to the 8 feature values in order.
If ``as_frame`` is True, ``data`` is a pandas object.
target : numpy array of shape (20640,)
Each value corresponds to the average
house value in units of 100,000.
If ``as_frame`` is True, ``target`` is a pandas object.
feature_names : list of length 8
Array of ordered feature names used in the dataset.
DESCR : string
Description of the California housing dataset.
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
.. versionadded:: 0.23
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
Notes
-----
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
filepath = _pkl_filepath(data_home, 'cal_housing.pkz')
if not exists(filepath):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading Cal. housing from {} to {}'.format(
ARCHIVE.url, data_home))
archive_path = _fetch_remote(ARCHIVE, dirname=data_home)
with tarfile.open(mode="r:gz", name=archive_path) as f:
cal_housing = np.loadtxt(
f.extractfile('CaliforniaHousing/cal_housing.data'),
delimiter=',')
# Columns are not in the same order compared to the previous
# URL resource on lib.stat.cmu.edu
columns_index = [8, 7, 2, 3, 4, 5, 6, 1, 0]
cal_housing = cal_housing[:, columns_index]
joblib.dump(cal_housing, filepath, compress=6)
remove(archive_path)
else:
cal_housing = joblib.load(filepath)
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / households
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'california_housing.rst')) as dfile:
descr = dfile.read()
X = data
y = target
frame = None
target_names = ["MedHouseVal", ]
if as_frame:
frame, X, y = _convert_data_dataframe("fetch_california_housing",
data,
target,
feature_names,
target_names)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=descr)
| bsd-3-clause |
jorik041/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/tests/test_msgpack/test_extension.py | 9 | 2254 | from __future__ import print_function
import array
import pandas.msgpack as msgpack
from pandas.msgpack import ExtType
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A'*16) == b'\xd8\x42' + b'A'*16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A'*0x0123) == b'\xc8\x01\x23\x42' + b'A'*0x0123 # ext 16
assert p(b'A'*0x00012345) == b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345 # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A'*16, ExtType(0x42, b'A'*16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A'*0x0123,
ExtType(0x42, b'A'*0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A'*0x00012345,
ExtType(0x42, b'A'*0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = obj.tostring()
return ExtType(typecode, data)
raise TypeError("Unknwon type object %r" % (obj,))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
obj.fromstring(data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
| mit |
bchappet/dnfpy | src/dnfpyUtils/stats/clusterMap.py | 1 | 7473 | from dnfpy.core.map2D import Map2D
import numpy as np
from sklearn.cluster import DBSCAN
import scipy.spatial.distance as dist
from dnfpyUtils.stats.statistic import Statistic
class ClusterMap(Statistic):
"""
Params:
"continuity" : float if different of 0.0, we assume that the cluster are continuous
A continuous cluster allow a loss of activity during continuity seconds.
Otherwise, the cluster is deleted
We add the last cluster in the current coords
Then we deduce what label labels the new cluster
The first iteration determines the labels for the next ones
"threshold" : threshold for activity value to be considered
"min_sample" : how many activation are enough to be considered as a cluster
"clustSize" : in [0,1] max size of the expected cluster
"expectedNumberOfCluster" : as its name suggest. We will not compute anything if
the number of activation is higher than
(np.pi*(clustSize_/2.0)**2)*expectedNumberOfCluster
where clustSize_ is the actual clustSize = clustSize*size
Results:
_data = np array (nb_clust*2) with cluster barycenter coords X,Y:
1,2
3,5
3.8,3.4
"nbOutliners" : int nb outliners found at the last compute
if continuity > 0
"nbNewCluster": int nb of newly build cluster
"""
def __init__(self,name,size=0,dt=0.1,threshold=0.4,min_samples=3,
clustSize=0.15,sizeNpArr=1,continuity=1.0,expectedNumberOfCluster=1,
**kwargs):
super(ClusterMap,self).__init__(name=name,size=size,dt=dt,threshold=threshold,
clustSize=clustSize,min_samples=min_samples,sizeNpArr=sizeNpArr,
continuity=continuity,expectedNumberOfCluster=expectedNumberOfCluster,
**kwargs)
self.trace = []
def getTrace(self):
return self.trace
def reset(self):
super().reset()
self.clusters = []#cluster coords saved
#if continuity > 0, the cluster shoul not be off for more than continuity seconds
self.clusterOff = [] #we save the number of iteration that the cluster is off in this list
self.setArg(nbOutliners=0)
self.setArg(nbNewCluster=0)
self.setArg(nbComputationEmpty=0)
self.sumNbClusterSave = []
self.setArg(nbClusterSum=0)
self.setArg(maxNbAct=0)
self.nbActList = []
self.nbActivation = 0
self.diffNbClusterSum = 0 #add |nbClust - expectedNumberOfCluster| at every step
self._data = []
self.trace = []
self.dictOutCluster = {} #dict cluster -> output cluster
self.outputCluster = []
def _compute(self,size,np_arr,threshold,min_samples,clustSize_,continuity,expectedNumberOfCluster,dt,dim):
self.toProf(size,np_arr,threshold,min_samples,clustSize_,continuity,expectedNumberOfCluster,dt,dim)
def getMaxNbAct(self):
if len(self.nbActList) > 0:
return np.max(self.nbActList)
else:
return np.nan
def getMeanNbAct(self):
if len(self.nbActList) > 0:
return np.mean(self.nbActList)
else:
return np.nan
def toProf(self,size,np_arr,threshold,min_samples,clustSize_,continuity,
expectedNumberOfCluster,dt,dim):
maxArr = np.max(np_arr)
coords = np.where(np_arr > maxArr/1.2)
self.nbActivation = len(coords[0])
#print(self.nbActivation)
#if nbActivation > 0 and nbActivation < np_arr.shape[0]*1.6:
#print(expectedNumberOfCluster,clustSize_)
nbActMax = (np.pi*(clustSize_/2.0)**2)*expectedNumberOfCluster
if self.nbActivation < nbActMax and (self.nbActivation > 0 or (continuity > 0) and (len(self.clusters)>0)):
#print("nbActivation : %s"%self.nbActivation)
self.nbActList.append(self.nbActivation)
coordsArray = list(zip(coords[1],coords[0]))
if continuity > 0:
clusters = []
#we add the previous valid clusters to the coordArray : hence we 'll have the same label
for i in range(len(self._data)):
if not(np.any(np.isnan(self._data[i]))):
clust = {'id':i,'coord':self._data[i]}
coordsArray.insert(0,clust['coord'])
clusters.append(clust)
nbClust = len(clusters)
coordsArrayNp = np.array(coordsArray)
distanceMat = dist.cdist(coordsArrayNp,coordsArrayNp)
db = DBSCAN(min_samples=min_samples,eps=clustSize_).fit(distanceMat)
#set of labels (minus outliners)
unique_labels = set(db.labels_) - set([-1])
#print(db.labels_)
set_label = list(set(db.labels_))
#print(set_label)
clusterLabelList = []
for i in range(len(clusters)):
#lab = db.labels_[-1-i]
#print("index %s nb %s"%(-1-i,len(set_label)))
if i < len(set_label):
lab = db.labels_[i]#the labels are in the same ordre as the coordArray we added
else:
print("to few label!")
break
print("i : %s. label %s , sum %s"%(i,lab,np.sum(db.labels_==lab)))
clusters[i]['label'] = lab
clusterLabelList.append( lab)
#outliner are the labels which are -1 and not in the cluster list
nb_outliners = len(np.where(db.labels_ == -1)[0])
#update cluster positions
for cluster in clusters:
lab = cluster['label']
if lab != -1 :
cluster['coord'] = self.__getBarycenter(coordsArrayNp,db.labels_,lab)
else:
cluster['coord'] = np.array((np.nan,)*dim)
#check for new cluster
nbNewCluster = 0
for lab in set(set_label) -set(clusterLabelList):
barycenter = self.__getBarycenter(coordsArrayNp,db.labels_,lab)
#print("add cluster")
self._data.append(barycenter)
nbNewCluster += 1
#print("nb cluster ",len(self.clusters))
#print("nb clusterOff ",len(self.clusterOff))
self.setArg(nbNewCluster=nbNewCluster)
self.setArg(nbOutliners=nb_outliners)
for cluster in clusters:
self._data[cluster['id']] = cluster['coord']
print(self._data)
elif self.nbActivation == 0:
self.setArg(nbComputationEmpty=self.getArg("nbComputationEmpty")+1)
else:
#to many activation we don't compute cluster
self._data = np.array([np.array([-1,-1])])
self.trace.append(np.copy(self._data))
def __getBarycenter(self,coordsArrayNp,labels,lab):
coorBary = coordsArrayNp[np.where(labels == lab)]
barycenter = np.mean(coorBary,axis=0)
return barycenter
def _onParamsUpdate(self,clustSize,sizeNpArr):
clustSize_ = clustSize * sizeNpArr
return dict(clustSize_=clustSize_)
| gpl-2.0 |
kai5263499/networkx | examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| bsd-3-clause |
amolkahat/pandas | pandas/util/_validators.py | 4 | 13052 | """
Module that contains many useful utilities
for validating data or function arguments
"""
import warnings
from pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = 'argument' if max_arg_count == 1 else 'arguments'
raise TypeError(
"{fname}() takes at most {max_arg} {argument} "
"({given_arg} given)".format(
fname=fname, max_arg=max_arg_count,
argument=argument, given_arg=actual_arg_count))
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or \
(v1 is None and v2 is not None):
match = False
else:
match = (v1 == v2)
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = (arg_val_dict[key] is compat_args[key])
if not match:
raise ValueError(("the '{arg}' parameter is not "
"supported in the pandas "
"implementation of {fname}()".
format(fname=fname, arg=key)))
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
fname: str
The name of the function being passed the `*args` parameter
args: tuple
The `*args` parameter passed into a function
max_fname_arg_count: int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, an ordered dictionary ensures that the original
order of the keyword arguments is enforced. Note that if there is
only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are `compat_args`
ValueError if `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(("{fname}() got an unexpected "
"keyword argument '{arg}'".
format(fname=fname, arg=bad_arg)))
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
kwargs: dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: OrderedDict
A ordered dictionary of keys that `kwargs` is allowed to
have and their associated default values. Note that if there
is only one key, a generic dict can be passed in as well.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : purely args validation
validate_kwargs : purely kwargs validation
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(fname, args + tuple(kwargs.values()),
max_fname_arg_count, compat_args)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError("{fname}() got multiple values for keyword "
"argument '{arg}'".format(fname=fname, arg=key))
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError('For argument "{arg}" expected type bool, received '
'type {typ}.'.format(arg=arg_name,
typ=type(value).__name__))
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame or Panel
arg : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO(PY3): Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = ("{} got multiple values for argument "
"'{}'".format(method_name, arg_name))
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get('axis', 0))
out[axis] = args[0]
elif len(args) == 2:
if 'axis' in kwargs:
# Unambiguously wrong
msg = ("Cannot specify both 'axis' and any of 'index' "
"or 'columns'")
raise TypeError(msg)
msg = ("Interpreting call\n\t'.{method_name}(a, b)' as "
"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
" a 'TypeError'.")
warnings.warn(msg.format(method_name=method_name,), FutureWarning,
stacklevel=4)
out[data._AXIS_NAMES[0]] = args[0]
out[data._AXIS_NAMES[1]] = args[1]
else:
msg = "Cannot specify all of '{}', 'index', 'columns'."
raise TypeError(msg.format(arg_name))
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/gridspec.py | 10 | 15668 | """
:mod:`~matplotlib.gridspec` is a module which specifies the location
of the subplot in the figure.
``GridSpec``
specifies the geometry of the grid that a subplot will be
placed. The number of rows and number of columns of the grid
need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
``SubplotSpec``
specifies the location of the subplot in the given *GridSpec*.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.transforms as mtransforms
import numpy as np
import warnings
class GridSpecBase(object):
"""
A base class of GridSpec that specifies the geometry of the grid
that a subplot will be placed.
"""
def __init__(self, nrows, ncols,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. Optionally, the ratio of heights and widths of rows and
columns can be specified.
"""
#self.figure = figure
self._nrows , self._ncols = nrows, ncols
self.set_height_ratios(height_ratios)
self.set_width_ratios(width_ratios)
def get_geometry(self):
'get the geometry of the grid, e.g., 2,3'
return self._nrows, self._ncols
def get_subplot_params(self, fig=None):
pass
def new_subplotspec(self, loc, rowspan=1, colspan=1):
"""
create and return a SuplotSpec instance.
"""
loc1, loc2 = loc
subplotspec = self[loc1:loc1+rowspan, loc2:loc2+colspan]
return subplotspec
def set_width_ratios(self, width_ratios):
self._col_width_ratios = width_ratios
def get_width_ratios(self):
return self._col_width_ratios
def set_height_ratios(self, height_ratios):
self._row_height_ratios = height_ratios
def get_height_ratios(self):
return self._row_height_ratios
def get_grid_positions(self, fig):
"""
return lists of bottom and top position of rows, left and
right positions of columns.
"""
nrows, ncols = self.get_geometry()
subplot_params = self.get_subplot_params(fig)
left = subplot_params.left
right = subplot_params.right
bottom = subplot_params.bottom
top = subplot_params.top
wspace = subplot_params.wspace
hspace = subplot_params.hspace
totWidth = right-left
totHeight = top-bottom
# calculate accumulated heights of columns
cellH = totHeight/(nrows + hspace*(nrows-1))
sepH = hspace*cellH
if self._row_height_ratios is not None:
netHeight = cellH * nrows
tr = float(sum(self._row_height_ratios))
cellHeights = [netHeight*r/tr for r in self._row_height_ratios]
else:
cellHeights = [cellH] * nrows
sepHeights = [0] + ([sepH] * (nrows-1))
cellHs = np.add.accumulate(np.ravel(list(zip(sepHeights, cellHeights))))
# calculate accumulated widths of rows
cellW = totWidth/(ncols + wspace*(ncols-1))
sepW = wspace*cellW
if self._col_width_ratios is not None:
netWidth = cellW * ncols
tr = float(sum(self._col_width_ratios))
cellWidths = [netWidth*r/tr for r in self._col_width_ratios]
else:
cellWidths = [cellW] * ncols
sepWidths = [0] + ([sepW] * (ncols-1))
cellWs = np.add.accumulate(np.ravel(list(zip(sepWidths, cellWidths))))
figTops = [top - cellHs[2*rowNum] for rowNum in range(nrows)]
figBottoms = [top - cellHs[2*rowNum+1] for rowNum in range(nrows)]
figLefts = [left + cellWs[2*colNum] for colNum in range(ncols)]
figRights = [left + cellWs[2*colNum+1] for colNum in range(ncols)]
return figBottoms, figTops, figLefts, figRights
def __getitem__(self, key):
"""
create and return a SuplotSpec instance.
"""
nrows, ncols = self.get_geometry()
total = nrows*ncols
if isinstance(key, tuple):
try:
k1, k2 = key
except ValueError:
raise ValueError("unrecognized subplot spec")
if isinstance(k1, slice):
row1, row2, _ = k1.indices(nrows)
else:
if k1 < 0:
k1 += nrows
if k1 >= nrows or k1 < 0 :
raise IndexError("index out of range")
row1, row2 = k1, k1+1
if isinstance(k2, slice):
col1, col2, _ = k2.indices(ncols)
else:
if k2 < 0:
k2 += ncols
if k2 >= ncols or k2 < 0 :
raise IndexError("index out of range")
col1, col2 = k2, k2+1
num1 = row1*ncols + col1
num2 = (row2-1)*ncols + (col2-1)
# single key
else:
if isinstance(key, slice):
num1, num2, _ = key.indices(total)
num2 -= 1
else:
if key < 0:
key += total
if key >= total or key < 0 :
raise IndexError("index out of range")
num1, num2 = key, None
return SubplotSpec(self, num1, num2)
class GridSpec(GridSpecBase):
"""
A class that specifies the geometry of the grid that a subplot
will be placed. The location of grid is determined by similar way
as the SubplotParams.
"""
def __init__(self, nrows, ncols,
left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None,
width_ratios=None, height_ratios=None):
"""
The number of rows and number of columns of the
grid need to be set. Optionally, the subplot layout parameters
(e.g., left, right, etc.) can be tuned.
"""
#self.figure = figure
self.left=left
self.bottom=bottom
self.right=right
self.top=top
self.wspace=wspace
self.hspace=hspace
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
#self.set_width_ratios(width_ratios)
#self.set_height_ratios(height_ratios)
_AllowedKeys = ["left", "bottom", "right", "top", "wspace", "hspace"]
def update(self, **kwargs):
"""
Update the current values. If any kwarg is None, default to
the current value, if set, otherwise to rc.
"""
for k, v in six.iteritems(kwargs):
if k in self._AllowedKeys:
setattr(self, k, v)
else:
raise AttributeError("%s is unknown keyword" % (k,))
from matplotlib import _pylab_helpers
from matplotlib.axes import SubplotBase
for figmanager in six.itervalues(_pylab_helpers.Gcf.figs):
for ax in figmanager.canvas.figure.axes:
# copied from Figure.subplots_adjust
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if ax._sharex is not None and isinstance(ax._sharex, SubplotBase):
if ax._sharex.get_subplotspec().get_gridspec() == self:
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif ax._sharey is not None and isinstance(ax._sharey,SubplotBase):
if ax._sharey.get_subplotspec().get_gridspec() == self:
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ss = ax.get_subplotspec().get_topmost_subplotspec()
if ss.get_gridspec() == self:
ax.update_params()
ax.set_position(ax.figbox)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters. The default
parameters are from rcParams unless a figure attribute is set.
"""
from matplotlib.figure import SubplotParams
import copy
if fig is None:
kw = dict([(k, rcParams["figure.subplot."+k]) \
for k in self._AllowedKeys])
subplotpars = SubplotParams(**kw)
else:
subplotpars = copy.copy(fig.subplotpars)
update_kw = dict([(k, getattr(self, k)) for k in self._AllowedKeys])
subplotpars.update(**update_kw)
return subplotpars
def locally_modified_subplot_params(self):
return [k for k in self._AllowedKeys if getattr(self, k)]
def tight_layout(self, fig, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
from .tight_layout import (get_subplotspec_list,
get_tight_layout_figure,
get_renderer)
subplotspec_list = get_subplotspec_list(fig.axes, grid_spec=self)
if None in subplotspec_list:
warnings.warn("This figure includes Axes that are not "
"compatible with tight_layout, so its "
"results might be incorrect.")
if renderer is None:
renderer = get_renderer(fig)
kwargs = get_tight_layout_figure(fig, fig.axes, subplotspec_list,
renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=rect,
)
self.update(**kwargs)
class GridSpecFromSubplotSpec(GridSpecBase):
"""
GridSpec whose subplot layout parameters are inherited from the
location specified by a given SubplotSpec.
"""
def __init__(self, nrows, ncols,
subplot_spec,
wspace=None, hspace=None,
height_ratios=None, width_ratios=None):
"""
The number of rows and number of columns of the grid need to
be set. An instance of SubplotSpec is also needed to be set
from which the layout parameters will be inherited. The wspace
and hspace of the layout can be optionally specified or the
default values (from the figure or rcParams) will be used.
"""
self._wspace=wspace
self._hspace=hspace
self._subplot_spec = subplot_spec
GridSpecBase.__init__(self, nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
def get_subplot_params(self, fig=None):
"""
return a dictionary of subplot layout parameters.
"""
if fig is None:
hspace = rcParams["figure.subplot.hspace"]
wspace = rcParams["figure.subplot.wspace"]
else:
hspace = fig.subplotpars.hspace
wspace = fig.subplotpars.wspace
if self._hspace is not None:
hspace = self._hspace
if self._wspace is not None:
wspace = self._wspace
figbox = self._subplot_spec.get_position(fig, return_all=False)
left, bottom, right, top = figbox.extents
from matplotlib.figure import SubplotParams
sp = SubplotParams(left=left,
right=right,
bottom=bottom,
top=top,
wspace=wspace,
hspace=hspace)
return sp
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
return self._subplot_spec.get_topmost_subplotspec()
class SubplotSpec(object):
"""
specifies the location of the subplot in the given *GridSpec*.
"""
def __init__(self, gridspec, num1, num2=None):
"""
The subplot will occupy the num1-th cell of the given
gridspec. If num2 is provided, the subplot will span between
num1-th cell and num2-th cell.
The index stars from 0.
"""
rows, cols = gridspec.get_geometry()
total = rows*cols
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
get the subplot geometry, e.g., 2,2,3. Unlike SuplorParams,
index is 0-based
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
def get_position(self, fig, return_all=False):
"""
update the subplot position from fig.subplotpars
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
figBottoms, figTops, figLefts, figRights = \
gridspec.get_grid_positions(fig)
rowNum, colNum = divmod(self.num1, ncols)
figBottom = figBottoms[rowNum]
figTop = figTops[rowNum]
figLeft = figLefts[colNum]
figRight = figRights[colNum]
if self.num2 is not None:
rowNum2, colNum2 = divmod(self.num2, ncols)
figBottom2 = figBottoms[rowNum2]
figTop2 = figTops[rowNum2]
figLeft2 = figLefts[colNum2]
figRight2 = figRights[colNum2]
figBottom = min(figBottom, figBottom2)
figLeft = min(figLeft, figLeft2)
figTop = max(figTop, figTop2)
figRight = max(figRight, figRight2)
figbox = mtransforms.Bbox.from_extents(figLeft, figBottom,
figRight, figTop)
if return_all:
return figbox, rowNum, colNum, nrows, ncols
else:
return figbox
def get_topmost_subplotspec(self):
'get the topmost SubplotSpec instance associated with the subplot'
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
# check to make sure other has the attributes
# we need to do the comparison
if not (hasattr(other, '_gridspec') and
hasattr(other, 'num1') and
hasattr(other, 'num2')):
return False
return all((self._gridspec == other._gridspec,
self.num1 == other.num1,
self.num2 == other.num2))
def __hash__(self):
return (hash(self._gridspec) ^
hash(self.num1) ^
hash(self.num2))
| lgpl-3.0 |
jayantk/pnp | experiments/dipart/scripts/visualize/generate_heatmap.py | 1 | 1411 | #!/usr/bin/python
# Generate heatmap of points
import numpy as np
import seaborn as sns
sns.set()
import matplotlib.pyplot as plt
from heatmap_data import *
# image_name=
# im = plt.imread(image_name);
# implot = plt.imshow(im);
# Load the example flights dataset and conver to long-form
# flights_long = sns.load_dataset("flights")
# flights = flights_long.pivot("month", "year", "passengers")
def sample_kde_data(data):
u = np.exp(data)
z = np.sum(u)
p = (u / z) * 1000
xs = []
ys = []
for yind in xrange(len(p)):
for xind in xrange(len(p[yind])):
c = int(p[yind][xind])
xs += [xind] * c
ys += [NUM_POINTS - yind] * c
return (np.array(xs), np.array(ys))
NUM_POINTS=25
def plot_kde(data, cmap):
(xs, ys) = sample_kde_data(data)
print len(xs)
sns.kdeplot(xs, ys, cmap=cmap, shade=True, shade_lowest=False, clip=[[0,NUM_POINTS], [0, NUM_POINTS]], alpha=0.5)
# img = plt.imread("data/dqa_parts_v1/fighter-jet/fighter-jet_0000.png")
img = plt.imread("data/dqa_parts_v1/antelope/antelope_0000.png")
fig, ax = plt.subplots()
ax.imshow(img, extent=[0, NUM_POINTS, 0, NUM_POINTS])
plot_kde(neck_data3, "Blues")
# plot_kde(leg_data2, "Reds")
# plot_kde(tail_data2, "Greens")
plt.axis('off')
plt.show()
# Draw a heatmap with the numeric values in each cell
# sns.heatmap(data, cbar=False, cmap="coolwarm")
# plt.show()
| apache-2.0 |
treycausey/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
vorwerkc/pymatgen | pymatgen/analysis/chemenv/coordination_environments/voronoi.py | 5 | 44209 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the object used to describe the possible bonded atoms based on a Voronoi analysis.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import logging
import time
import numpy as np
from monty.json import MSONable
from scipy.spatial import Voronoi
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import (
get_lower_and_upper_f,
my_solid_angle,
rectangle_surface_intersection,
)
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.utils.math_utils import normal_cdf_step
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
def from_bson_voronoi_list2(bson_nb_voro_list2, structure):
"""
Returns the voronoi_list needed for the VoronoiContainer object from a bson-encoded voronoi_list.
Args:
bson_nb_voro_list2: List of periodic sites involved in the Voronoi.
structure: Structure object.
Returns:
The voronoi_list needed for the VoronoiContainer (with PeriodicSites as keys of the dictionary - not
allowed in the BSON format).
"""
voronoi_list = [None] * len(bson_nb_voro_list2)
for isite, voro in enumerate(bson_nb_voro_list2):
if voro is None or voro == "None":
continue
voronoi_list[isite] = []
for psd, dd in voro:
struct_site = structure[dd["index"]]
periodic_site = PeriodicSite(
struct_site._species,
struct_site.frac_coords + psd[1],
struct_site._lattice,
properties=struct_site.properties,
)
dd["site"] = periodic_site
voronoi_list[isite].append(dd)
return voronoi_list
class DetailedVoronoiContainer(MSONable):
"""
Class used to store the full Voronoi of a given structure.
"""
AC = AdditionalConditions()
default_voronoi_cutoff = 10.0
default_normalized_distance_tolerance = 1e-5
default_normalized_angle_tolerance = 1e-3
def __init__(
self,
structure=None,
voronoi_list2=None,
voronoi_cutoff=default_voronoi_cutoff,
isites=None,
normalized_distance_tolerance=default_normalized_distance_tolerance,
normalized_angle_tolerance=default_normalized_angle_tolerance,
additional_conditions=None,
valences=None,
maximum_distance_factor=None,
minimum_angle_factor=None,
):
"""
Constructor for the VoronoiContainer object. Either a structure is given, in which case the Voronoi is
computed, or the different components of the VoronoiContainer are given (used in the from_dict method).
Args:
structure: Structure for which the Voronoi is computed.
voronoi_list2: List of voronoi polyhedrons for each site.
voronoi_cutoff: cutoff used for the voronoi.
isites: indices of sites for which the Voronoi has to be computed.
normalized_distance_tolerance: Tolerance for two normalized distances to be considered equal.
normalized_angle_tolerance:Tolerance for two normalized angles to be considered equal.
additional_conditions: Additional conditions to be used.
valences: Valences of all the sites in the structure (used when additional conditions require it).
maximum_distance_factor: The maximum distance factor to be considered.
minimum_angle_factor: The minimum angle factor to be considered.
Raises:
RuntimeError if the Voronoi cannot be constructed.
"""
self.normalized_distance_tolerance = normalized_distance_tolerance
self.normalized_angle_tolerance = normalized_angle_tolerance
if additional_conditions is None:
self.additional_conditions = [self.AC.NONE, self.AC.ONLY_ACB]
else:
self.additional_conditions = additional_conditions
self.valences = valences
self.maximum_distance_factor = maximum_distance_factor
self.minimum_angle_factor = minimum_angle_factor
if isites is None:
indices = list(range(len(structure)))
else:
indices = isites
self.structure = structure
logging.debug("Setting Voronoi list")
if voronoi_list2 is not None:
self.voronoi_list2 = voronoi_list2
else:
self.setup_voronoi_list(indices=indices, voronoi_cutoff=voronoi_cutoff)
logging.debug("Setting neighbors distances and angles")
t1 = time.process_time()
self.setup_neighbors_distances_and_angles(indices=indices)
t2 = time.process_time()
logging.debug("Neighbors distances and angles set up in {:.2f} seconds".format(t2 - t1))
def setup_voronoi_list(self, indices, voronoi_cutoff):
"""
Set up of the voronoi list of neighbours by calling qhull.
Args:
indices: indices of the sites for which the Voronoi is needed.
voronoi_cutoff: Voronoi cutoff for the search of neighbours.
Raises:
RuntimeError: If an infinite vertex is found in the voronoi construction.
"""
self.voronoi_list2 = [None] * len(self.structure)
self.voronoi_list_coords = [None] * len(self.structure)
logging.debug("Getting all neighbors in structure")
struct_neighbors = self.structure.get_all_neighbors(voronoi_cutoff, include_index=True)
t1 = time.process_time()
logging.debug("Setting up Voronoi list :")
for jj, isite in enumerate(indices):
logging.debug(" - Voronoi analysis for site #{:d} ({:d}/{:d})".format(isite, jj + 1, len(indices)))
site = self.structure[isite]
neighbors1 = [(site, 0.0, isite)]
neighbors1.extend(struct_neighbors[isite])
distances = [i[1] for i in sorted(neighbors1, key=lambda s: s[1])]
neighbors = [i[0] for i in sorted(neighbors1, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(points=qvoronoi_input, qhull_options="o Fv")
all_vertices = voro.vertices
results2 = []
maxangle = 0.0
mindist = 10000.0
for iridge, ridge_points in enumerate(voro.ridge_points):
if 0 in ridge_points:
ridge_vertices_indices = voro.ridge_vertices[iridge]
if -1 in ridge_vertices_indices:
raise RuntimeError(
"This structure is pathological," " infinite vertex in the voronoi " "construction"
)
ridge_point2 = max(ridge_points)
facets = [all_vertices[i] for i in ridge_vertices_indices]
sa = my_solid_angle(site.coords, facets)
maxangle = max([sa, maxangle])
mindist = min([mindist, distances[ridge_point2]])
for iii, sss in enumerate(self.structure):
if neighbors[ridge_point2].is_periodic_image(sss, tolerance=1.0e-6):
myindex = iii
break
results2.append(
{
"site": neighbors[ridge_point2],
"angle": sa,
"distance": distances[ridge_point2],
"index": myindex,
}
)
for dd in results2:
dd["normalized_angle"] = dd["angle"] / maxangle
dd["normalized_distance"] = dd["distance"] / mindist
self.voronoi_list2[isite] = results2
self.voronoi_list_coords[isite] = np.array([dd["site"].coords for dd in results2])
t2 = time.process_time()
logging.debug("Voronoi list set up in {:.2f} seconds".format(t2 - t1))
def setup_neighbors_distances_and_angles(self, indices):
"""
Initializes the angle and distance separations.
Args:
indices: Indices of the sites for which the Voronoi is needed.
"""
self.neighbors_distances = [None] * len(self.structure)
self.neighbors_normalized_distances = [None] * len(self.structure)
self.neighbors_angles = [None] * len(self.structure)
self.neighbors_normalized_angles = [None] * len(self.structure)
for isite in indices:
results = self.voronoi_list2[isite]
if results is None:
continue
# Initializes neighbors distances and normalized distances groups
self.neighbors_distances[isite] = []
self.neighbors_normalized_distances[isite] = []
normalized_distances = [nb_dict["normalized_distance"] for nb_dict in results]
isorted_distances = np.argsort(normalized_distances)
self.neighbors_normalized_distances[isite].append(
{
"min": normalized_distances[isorted_distances[0]],
"max": normalized_distances[isorted_distances[0]],
}
)
self.neighbors_distances[isite].append(
{
"min": results[isorted_distances[0]]["distance"],
"max": results[isorted_distances[0]]["distance"],
}
)
icurrent = 0
nb_indices = {int(isorted_distances[0])}
dnb_indices = {int(isorted_distances[0])}
for idist in iter(isorted_distances):
wd = normalized_distances[idist]
if self.maximum_distance_factor is not None:
if wd > self.maximum_distance_factor:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wd,
self.neighbors_normalized_distances[isite][icurrent]["max"],
rtol=0.0,
atol=self.normalized_distance_tolerance,
):
self.neighbors_normalized_distances[isite][icurrent]["max"] = wd
self.neighbors_distances[isite][icurrent]["max"] = results[idist]["distance"]
dnb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(idist)}
self.neighbors_normalized_distances[isite].append({"min": wd, "max": wd})
self.neighbors_distances[isite].append(
{
"min": results[idist]["distance"],
"max": results[idist]["distance"],
}
)
icurrent += 1
nb_indices.add(int(idist))
else:
self.neighbors_normalized_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_distances[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_distances[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for idist in range(len(self.neighbors_distances[isite]) - 1):
dist_dict = self.neighbors_distances[isite][idist]
dist_dict_next = self.neighbors_distances[isite][idist + 1]
dist_dict["next"] = dist_dict_next["min"]
ndist_dict = self.neighbors_normalized_distances[isite][idist]
ndist_dict_next = self.neighbors_normalized_distances[isite][idist + 1]
ndist_dict["next"] = ndist_dict_next["min"]
if self.maximum_distance_factor is not None:
dfact = self.maximum_distance_factor
else:
dfact = self.default_voronoi_cutoff / self.neighbors_distances[isite][0]["min"]
self.neighbors_normalized_distances[isite][-1]["next"] = dfact
self.neighbors_distances[isite][-1]["next"] = dfact * self.neighbors_distances[isite][0]["min"]
# Initializes neighbors angles and normalized angles groups
self.neighbors_angles[isite] = []
self.neighbors_normalized_angles[isite] = []
normalized_angles = [nb_dict["normalized_angle"] for nb_dict in results]
isorted_angles = np.argsort(normalized_angles)[::-1]
self.neighbors_normalized_angles[isite].append(
{
"max": normalized_angles[isorted_angles[0]],
"min": normalized_angles[isorted_angles[0]],
}
)
self.neighbors_angles[isite].append(
{
"max": results[isorted_angles[0]]["angle"],
"min": results[isorted_angles[0]]["angle"],
}
)
icurrent = 0
nb_indices = {int(isorted_angles[0])}
dnb_indices = {int(isorted_angles[0])}
for iang in iter(isorted_angles):
wa = normalized_angles[iang]
if self.minimum_angle_factor is not None:
if wa < self.minimum_angle_factor:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
break
if np.isclose(
wa,
self.neighbors_normalized_angles[isite][icurrent]["min"],
rtol=0.0,
atol=self.normalized_angle_tolerance,
):
self.neighbors_normalized_angles[isite][icurrent]["min"] = wa
self.neighbors_angles[isite][icurrent]["min"] = results[iang]["angle"]
dnb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
dnb_indices = {int(iang)}
self.neighbors_normalized_angles[isite].append({"max": wa, "min": wa})
self.neighbors_angles[isite].append({"max": results[iang]["angle"], "min": results[iang]["angle"]})
icurrent += 1
nb_indices.add(int(iang))
else:
self.neighbors_normalized_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_angles[isite][icurrent]["nb_indices"] = list(nb_indices)
self.neighbors_normalized_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
self.neighbors_angles[isite][icurrent]["dnb_indices"] = list(dnb_indices)
for iang in range(len(self.neighbors_angles[isite]) - 1):
ang_dict = self.neighbors_angles[isite][iang]
ang_dict_next = self.neighbors_angles[isite][iang + 1]
ang_dict["next"] = ang_dict_next["max"]
nang_dict = self.neighbors_normalized_angles[isite][iang]
nang_dict_next = self.neighbors_normalized_angles[isite][iang + 1]
nang_dict["next"] = nang_dict_next["max"]
if self.minimum_angle_factor is not None:
afact = self.minimum_angle_factor
else:
afact = 0.0
self.neighbors_normalized_angles[isite][-1]["next"] = afact
self.neighbors_angles[isite][-1]["next"] = afact * self.neighbors_angles[isite][0]["max"]
def _precompute_additional_conditions(self, ivoronoi, voronoi, valences):
additional_conditions = {ac: [] for ac in self.additional_conditions}
for ips, (ps, vals) in enumerate(voronoi):
for ac in self.additional_conditions:
additional_conditions[ac].append(
self.AC.check_condition(
condition=ac,
structure=self.structure,
parameters={
"valences": valences,
"neighbor_index": vals["index"],
"site_index": ivoronoi,
},
)
)
return additional_conditions
def _precompute_distance_conditions(self, ivoronoi, voronoi):
distance_conditions = []
for idp, dp_dict in enumerate(self.neighbors_normalized_distances[ivoronoi]):
distance_conditions.append([])
dp = dp_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
distance_conditions[idp].append(
vals["normalized_distance"] <= dp
or np.isclose(
vals["normalized_distance"],
dp,
rtol=0.0,
atol=self.normalized_distance_tolerance / 2.0,
)
)
return distance_conditions
def _precompute_angle_conditions(self, ivoronoi, voronoi):
angle_conditions = []
for iap, ap_dict in enumerate(self.neighbors_normalized_angles[ivoronoi]):
angle_conditions.append([])
ap = ap_dict["max"]
for ips, (ps, vals) in enumerate(voronoi):
angle_conditions[iap].append(
vals["normalized_angle"] >= ap
or np.isclose(
vals["normalized_angle"],
ap,
rtol=0.0,
atol=self.normalized_angle_tolerance / 2.0,
)
)
return angle_conditions
# def neighbors_map(self, isite, distfactor, angfactor, additional_condition):
# if self.neighbors_normalized_distances[isite] is None:
# return None
# dist_where = np.argwhere(
# np.array([wd['min'] for wd in self.neighbors_normalized_distances[isite]]) <= distfactor)
# if len(dist_where) == 0:
# return None
# idist = dist_where[-1][0]
# ang_where = np.argwhere(np.array([wa['max'] for wa in self.neighbors_normalized_angles[isite]]) >= angfactor)
# if len(ang_where) == 0:
# return None
# iang = ang_where[0][0]
# if self.additional_conditions.count(additional_condition) != 1:
# return None
# i_additional_condition = self.additional_conditions.index(additional_condition)
# return {'i_distfactor': idist, 'i_angfactor': iang, 'i_additional_condition': i_additional_condition}
def neighbors_surfaces(self, isite, surface_calculation_type=None, max_dist=2.0):
"""
Get the different surfaces corresponding to the different distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(isite, surface_calculation_type, max_dist)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
this_dist_plateau = distance_bounds[idp + 1] - distance_bounds[idp]
for iap in range(len(angle_bounds) - 1):
this_ang_plateau = angle_bounds[iap + 1] - angle_bounds[iap]
surfaces[idp][iap] = np.absolute(this_dist_plateau * this_ang_plateau)
return surfaces
def neighbors_surfaces_bounded(self, isite, surface_calculation_options=None):
"""
Get the different surfaces (using boundaries) corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site.
surface_calculation_options: Options for the boundaries.
Returns:
Surfaces for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if surface_calculation_options is None:
surface_calculation_options = {
"type": "standard_elliptic",
"distance_bounds": {"lower": 1.2, "upper": 1.8},
"angle_bounds": {"lower": 0.1, "upper": 0.8},
}
if surface_calculation_options["type"] in [
"standard_elliptic",
"standard_diamond",
"standard_spline",
]:
plot_type = {
"distance_parameter": ("initial_normalized", None),
"angle_parameter": ("initial_normalized", None),
}
else:
raise ValueError(
'Type "{}" for the surface calculation in DetailedVoronoiContainer '
"is invalid".format(surface_calculation_options["type"])
)
max_dist = surface_calculation_options["distance_bounds"]["upper"] + 0.1
bounds_and_limits = self.voronoi_parameters_bounds_and_limits(
isite=isite, plot_type=plot_type, max_dist=max_dist
)
distance_bounds = bounds_and_limits["distance_bounds"]
angle_bounds = bounds_and_limits["angle_bounds"]
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_calculation_options)
mindist = surface_calculation_options["distance_bounds"]["lower"]
maxdist = surface_calculation_options["distance_bounds"]["upper"]
minang = surface_calculation_options["angle_bounds"]["lower"]
maxang = surface_calculation_options["angle_bounds"]["upper"]
f_lower = lower_and_upper_functions["lower"]
f_upper = lower_and_upper_functions["upper"]
surfaces = np.zeros((len(distance_bounds), len(angle_bounds)), np.float_)
for idp in range(len(distance_bounds) - 1):
dp1 = distance_bounds[idp]
dp2 = distance_bounds[idp + 1]
if dp2 < mindist or dp1 > maxdist:
continue
if dp1 < mindist:
d1 = mindist
else:
d1 = dp1
if dp2 > maxdist:
d2 = maxdist
else:
d2 = dp2
for iap in range(len(angle_bounds) - 1):
ap1 = angle_bounds[iap]
ap2 = angle_bounds[iap + 1]
if ap1 > ap2:
ap1 = angle_bounds[iap + 1]
ap2 = angle_bounds[iap]
if ap2 < minang or ap1 > maxang:
continue
intersection, interror = rectangle_surface_intersection(
rectangle=((d1, d2), (ap1, ap2)),
f_lower=f_lower,
f_upper=f_upper,
bounds_lower=[mindist, maxdist],
bounds_upper=[mindist, maxdist],
check=False,
)
surfaces[idp][iap] = intersection
return surfaces
@staticmethod
def _get_vertices_dist_ang_indices(parameter_indices_list):
pp0 = [pp[0] for pp in parameter_indices_list]
pp1 = [pp[1] for pp in parameter_indices_list]
min_idist = min(pp0)
min_iang = min(pp1)
max_idist = max(pp0)
max_iang = max(pp1)
i_min_angs = np.argwhere(np.array(pp1) == min_iang)
i_max_dists = np.argwhere(np.array(pp0) == max_idist)
pp0_at_min_iang = [pp0[ii[0]] for ii in i_min_angs]
pp1_at_max_idist = [pp1[ii[0]] for ii in i_max_dists]
max_idist_at_min_iang = max(pp0_at_min_iang)
min_iang_at_max_idist = min(pp1_at_max_idist)
p1 = (min_idist, min_iang)
p2 = (max_idist_at_min_iang, min_iang)
p3 = (max_idist_at_min_iang, min_iang_at_max_idist)
p4 = (max_idist, min_iang_at_max_idist)
p5 = (max_idist, max_iang)
p6 = (min_idist, max_iang)
return [p1, p2, p3, p4, p5, p6]
def maps_and_surfaces(
self,
isite,
surface_calculation_type=None,
max_dist=2.0,
additional_conditions=None,
):
"""
Get the different surfaces and their cn_map corresponding to the different distance-angle cutoffs
for a given site.
Args:
isite: Index of the site
surface_calculation_type: How to compute the surface.
max_dist: The maximum distance factor to be considered.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces(
isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=max_dist,
)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def maps_and_surfaces_bounded(self, isite, surface_calculation_options=None, additional_conditions=None):
"""
Get the different surfaces (using boundaries) and their cn_map corresponding to the different
distance-angle cutoffs for a given site.
Args:
isite: Index of the site
surface_calculation_options: Options for the boundaries.
additional_conditions: If additional conditions have to be considered.
Returns:
Surfaces and cn_map's for each distance-angle cutoff.
"""
if self.voronoi_list2[isite] is None:
return None
if additional_conditions is None:
additional_conditions = [self.AC.ONLY_ACB]
surfaces = self.neighbors_surfaces_bounded(isite=isite, surface_calculation_options=surface_calculation_options)
maps_and_surfaces = []
for cn, value in self._unique_coordinated_neighbors_parameters_indices[isite].items(): # pylint: disable=E1101
for imap, list_parameters_indices in enumerate(value):
thissurf = 0.0
for (idp, iap, iacb) in list_parameters_indices:
if iacb in additional_conditions:
thissurf += surfaces[idp, iap]
maps_and_surfaces.append(
{
"map": (cn, imap),
"surface": thissurf,
"parameters_indices": list_parameters_indices,
}
)
return maps_and_surfaces
def neighbors(self, isite, distfactor, angfactor, additional_condition=None):
"""
Get the neighbors of a given site corresponding to a given distance and angle factor.
Args:
isite: Index of the site.
distfactor: Distance factor.
angfactor: Angle factor.
additional_condition: Additional condition to be used (currently not implemented).
Returns:
List of neighbors of the given site for the given distance and angle factors.
"""
idist = None
dfact = None
for iwd, wd in enumerate(self.neighbors_normalized_distances[isite]):
if distfactor >= wd["min"]:
idist = iwd
dfact = wd["max"]
else:
break
iang = None
afact = None
for iwa, wa in enumerate(self.neighbors_normalized_angles[isite]):
if angfactor <= wa["max"]:
iang = iwa
afact = wa["min"]
else:
break
if idist is None or iang is None:
raise ValueError("Distance or angle parameter not found ...")
return [
nb
for nb in self.voronoi_list2[isite]
if nb["normalized_distance"] <= dfact and nb["normalized_angle"] >= afact
]
def voronoi_parameters_bounds_and_limits(self, isite, plot_type, max_dist):
"""
Get the different boundaries and limits of the distance and angle factors for the given site.
Args:
isite: Index of the site.
plot_type: Types of distance/angle parameters to get.
max_dist: Maximum distance factor.
Returns:
Distance and angle bounds and limits.
"""
# Initializes the distance and angle parameters
if self.voronoi_list2[isite] is None:
return None
if plot_type is None:
plot_type = {
"distance_parameter": ("initial_inverse_opposite", None),
"angle_parameter": ("initial_opposite", None),
}
dd = [dist["min"] for dist in self.neighbors_normalized_distances[isite]]
dd[0] = 1.0
if plot_type["distance_parameter"][0] == "initial_normalized":
dd.append(max_dist)
distance_bounds = np.array(dd)
dist_limits = [1.0, max_dist]
elif plot_type["distance_parameter"][0] == "initial_inverse_opposite":
ddinv = [1.0 / dist for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
elif plot_type["distance_parameter"][0] == "initial_inverse3_opposite":
ddinv = [1.0 / dist ** 3.0 for dist in dd]
ddinv.append(0.0)
distance_bounds = np.array([1.0 - invdist for invdist in ddinv])
dist_limits = [0.0, 1.0]
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the distance is not implemented".format(plot_type["distance_parameter"])
)
if plot_type["angle_parameter"][0] == "initial_normalized":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
angle_bounds = np.array(aa)
elif plot_type["angle_parameter"][0] == "initial_opposite":
aa = [0.0]
aa.extend([ang["max"] for ang in self.neighbors_normalized_angles[isite]])
aa = [1.0 - ang for ang in aa]
angle_bounds = np.array(aa)
else:
raise NotImplementedError(
'Plotting type "{}" ' "for the angle is not implemented".format(plot_type["angle_parameter"])
)
ang_limits = [0.0, 1.0]
return {
"distance_bounds": distance_bounds,
"distance_limits": dist_limits,
"angle_bounds": angle_bounds,
"angle_limits": ang_limits,
}
def is_close_to(self, other, rtol=0.0, atol=1e-8):
"""
Whether two DetailedVoronoiContainer objects are close to each other.
Args:
other: Another DetailedVoronoiContainer to be compared with.
rtol: Relative tolerance to compare values.
atol: Absolute tolerance to compare values.
Returns:
True if the two DetailedVoronoiContainer are close to each other.
"""
isclose = (
np.isclose(
self.normalized_angle_tolerance,
other.normalized_angle_tolerance,
rtol=rtol,
atol=atol,
)
and np.isclose(
self.normalized_distance_tolerance,
other.normalized_distance_tolerance,
rtol=rtol,
atol=atol,
)
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
)
if not isclose:
return isclose
for isite, site_voronoi in enumerate(self.voronoi_list2):
self_to_other_nbs = {}
for inb, nb in enumerate(site_voronoi):
if nb is None:
if other.voronoi_list2[isite] is None:
continue
return False
if other.voronoi_list2[isite] is None:
return False
nb_other = None
for inb2, nb2 in enumerate(other.voronoi_list2[isite]):
if nb["site"] == nb2["site"]:
self_to_other_nbs[inb] = inb2
nb_other = nb2
break
if nb_other is None:
return False
if not np.isclose(nb["distance"], nb_other["distance"], rtol=rtol, atol=atol):
return False
if not np.isclose(nb["angle"], nb_other["angle"], rtol=rtol, atol=atol):
return False
if not np.isclose(
nb["normalized_distance"],
nb_other["normalized_distance"],
rtol=rtol,
atol=atol,
):
return False
if not np.isclose(
nb["normalized_angle"],
nb_other["normalized_angle"],
rtol=rtol,
atol=atol,
):
return False
if nb["index"] != nb_other["index"]:
return False
if nb["site"] != nb_other["site"]:
return False
return True
def get_rdf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Radial Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize distances.
figsize: Size of the figure.
step_function: Type of step function to be used for the RDF.
Returns:
Matplotlib figure.
"""
def dp_func(dp):
return 1.0 - 1.0 / np.power(dp, 3.0)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "normal_cdf", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
dists = self.neighbors_normalized_distances[isite]
else:
dists = self.neighbors_distances[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([dd["min"] for dd in dists])
sorted_dists = [dists[ii]["min"] for ii in isorted]
dnb_dists = [len(dists[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for idist, dist in enumerate(sorted_dists):
xx.append(dist)
xx.append(dist)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_dists[idist])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
mydists = [dp_func(dd["min"]) for dd in dists]
mydcns = [len(dd["dnb_indices"]) for dd in dists]
xx = np.linspace(0.0, 1.1 * max(mydists), num=500)
yy = np.zeros_like(xx)
for idist, dist in enumerate(mydists):
yy += mydcns[idist] * normal_cdf_step(xx, mean=dist, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def get_sadf_figure(self, isite, normalized=True, figsize=None, step_function=None):
"""
Get the Solid Angle Distribution Figure for a given site.
Args:
isite: Index of the site.
normalized: Whether to normalize angles.
figsize: Size of the figure.
step_function: Type of step function to be used for the SADF.
Returns:
Matplotlib figure.
"""
def ap_func(ap):
return np.power(ap, -0.1)
import matplotlib.pyplot as plt
if step_function is None:
step_function = {"type": "step_function", "scale": 0.0001}
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
subplot = fig.add_subplot(111)
if normalized:
angs = self.neighbors_normalized_angles[isite]
else:
angs = self.neighbors_angles[isite]
if step_function["type"] == "step_function":
isorted = np.argsort([ap_func(aa["min"]) for aa in angs])
sorted_angs = [ap_func(angs[ii]["min"]) for ii in isorted]
dnb_angs = [len(angs[ii]["dnb_indices"]) for ii in isorted]
xx = [0.0]
yy = [0.0]
for iang, ang in enumerate(sorted_angs):
xx.append(ang)
xx.append(ang)
yy.append(yy[-1])
yy.append(yy[-1] + dnb_angs[iang])
xx.append(1.1 * xx[-1])
yy.append(yy[-1])
elif step_function["type"] == "normal_cdf":
scale = step_function["scale"]
myangs = [ap_func(aa["min"]) for aa in angs]
mydcns = [len(dd["dnb_indices"]) for dd in angs]
xx = np.linspace(0.0, 1.1 * max(myangs), num=500)
yy = np.zeros_like(xx)
for iang, ang in enumerate(myangs):
yy += mydcns[iang] * normal_cdf_step(xx, mean=ang, scale=scale)
else:
raise ValueError('Step function of type "{}" is not allowed'.format(step_function["type"]))
subplot.plot(xx, yy)
return fig
def __eq__(self, other):
return (
self.normalized_angle_tolerance == other.normalized_angle_tolerance
and self.normalized_distance_tolerance == other.normalized_distance_tolerance
and self.additional_conditions == other.additional_conditions
and self.valences == other.valences
and self.voronoi_list2 == other.voronoi_list2
and self.structure == other.structure
)
def __ne__(self, other):
return not self == other
def to_bson_voronoi_list2(self):
"""
Transforms the voronoi_list into a vlist + bson_nb_voro_list, that are BSON-encodable.
Returns:
[vlist, bson_nb_voro_list], to be used in the as_dict method.
"""
bson_nb_voro_list2 = [None] * len(self.voronoi_list2)
for ivoro, voro in enumerate(self.voronoi_list2):
if voro is None or voro == "None":
continue
site_voro = []
# {'site': neighbors[nn[1]],
# 'angle': sa,
# 'distance': distances[nn[1]],
# 'index': myindex}
for nb_dict in voro:
site = nb_dict["site"]
site_dict = {key: val for key, val in nb_dict.items() if key not in ["site"]}
# site_voro.append([ps.as_dict(), dd]) [float(c) for c in self.frac_coords]
diff = site.frac_coords - self.structure[nb_dict["index"]].frac_coords
site_voro.append([[nb_dict["index"], [float(c) for c in diff]], site_dict])
bson_nb_voro_list2[ivoro] = site_voro
return bson_nb_voro_list2
def as_dict(self):
"""
Bson-serializable dict representation of the VoronoiContainer.
Returns:
dictionary that is BSON-encodable.
"""
bson_nb_voro_list2 = self.to_bson_voronoi_list2()
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"bson_nb_voro_list2": bson_nb_voro_list2,
# "neighbors_lists": self.neighbors_lists,
"structure": self.structure.as_dict(),
"normalized_angle_tolerance": self.normalized_angle_tolerance,
"normalized_distance_tolerance": self.normalized_distance_tolerance,
"additional_conditions": self.additional_conditions,
"valences": self.valences,
"maximum_distance_factor": self.maximum_distance_factor,
"minimum_angle_factor": self.minimum_angle_factor,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the VoronoiContainer object from a dict representation of the VoronoiContainer created using
the as_dict method.
Args:
d: dict representation of the VoronoiContainer object.
Returns:
VoronoiContainer object.
"""
structure = Structure.from_dict(d["structure"])
voronoi_list2 = from_bson_voronoi_list2(d["bson_nb_voro_list2"], structure)
maximum_distance_factor = d["maximum_distance_factor"] if "maximum_distance_factor" in d else None
minimum_angle_factor = d["minimum_angle_factor"] if "minimum_angle_factor" in d else None
return cls(
structure=structure,
voronoi_list2=voronoi_list2,
# neighbors_lists=neighbors_lists,
normalized_angle_tolerance=d["normalized_angle_tolerance"],
normalized_distance_tolerance=d["normalized_distance_tolerance"],
additional_conditions=d["additional_conditions"],
valences=d["valences"],
maximum_distance_factor=maximum_distance_factor,
minimum_angle_factor=minimum_angle_factor,
)
| mit |
djgagne/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
adit-chandra/tensorflow | tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/compare_1k.py | 11 | 5011 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
| apache-2.0 |
costypetrisor/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 267 | 6813 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from nose.tools import raises
from nose.tools import assert_true
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
triskadecaepyon/pyworkout-toolkit | pyworkout/parsers/tcxtools.py | 1 | 4560 | """
Tools to process TCX files,
specifically for parsing and
converting to other formats.
"""
import numpy as np
import pandas as pd
from lxml import objectify
import dateutil.parser
import logging
TPXNS = "{http://www.garmin.com/xmlschemas/ActivityExtension/v2}TPX"
LXNS = "{http://www.garmin.com/xmlschemas/ActivityExtension/v2}LX"
class TCXPandas(object):
"""
Class for Parsing .TCX files to Pandas DataFrames.
Parameters
----------
tcx_file : string, path object,
the path to the tcx file
"""
def __init__(self, tcx_file, **kwds):
self.__filehandle__ = tcx_file
self.tcx = None
self.activity = None
self.dataframe = None
logging.basicConfig(filename="TCXconversion.log", level=logging.DEBUG)
def parse(self):
"""
Parse specified TCX file into a DataFrame
Return a Dataframe and sets Dataframe and sets
the self.dataframe object in the TCXParser.
"""
self.tcx = objectify.parse(open(self.__filehandle__))
self.activity = self.tcx.getroot().Activities.Activity
self.dataframe = pd.DataFrame(self._traverse_laps_())
return self.dataframe
def get_activity_timestamp(self):
"""
Returns the TCX file timestamp if parsed
"""
if self.activity is None:
return None
else:
return self.activity.Id
def get_sport(self):
"""
Returns the specified sport of the TCX file
"""
if self.activity is None:
return None
else:
return self.activity.attrib['Sport']
def get_workout_startime(self):
"""
Returns the starting timestamp of the specified TCX file
"""
if self.activity is None:
return None
else:
return self.activity.Lap.items()[0][1]
def _traverse_laps_(self):
# New iterator method to align with lxml standard
return_array = []
for laps in self.activity.Lap:
for tracks in laps.Track:
for trackingpoints in tracks.Trackpoint:
return_dict = {}
return_dict['time'] = dateutil.parser.parse(str(trackingpoints.Time))
try:
return_dict['latitude'] = \
np.float(trackingpoints.Position.LatitudeDegrees)
except AttributeError:
pass #TODO log this
try:
return_dict['longitude'] = \
np.float(trackingpoints.Position.LongitudeDegrees)
except AttributeError:
pass #TODO log this
try:
return_dict['altitude'] = np.float(trackingpoints.AltitudeMeters)
except AttributeError:
pass #TODO log this
try:
return_dict['distance'] = np.float(trackingpoints.DistanceMeters)
except AttributeError:
pass #TODO log this
try:
return_dict['hr'] = np.float(trackingpoints.HeartRateBpm.Value)
except AttributeError:
pass #TODO log this
try:
return_dict['speed'] = \
np.float(trackingpoints.Extensions[TPXNS].Speed)
except AttributeError:
pass #TODO log this
if self.get_sport == 'Running':
try:
return_dict['cadence'] = \
np.float(trackingpoints.Extensions[TPXNS].RunCadence)
except AttributeError:
pass #TODO log this
else: # self.activity.attrib['Sport'] == 'Biking':
try:
return_dict['cadence'] = np.float(trackingpoints.Cadence)
except AttributeError:
pass #TODO log this
try:
return_dict['power'] = \
np.float(trackingpoints.Extensions[TPXNS].Watts)
except AttributeError:
pass #TODO log this
return_array.append(return_dict)
return return_array
| bsd-3-clause |
cdondrup/strands_qsr_lib | qsr_lib/dbg/dbg_template_bounding_boxes_qsrs.py | 8 | 2711 | #!/usr/bin/python
# import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
class Dbg(object):
def __init__(self):
pass
def return_bounding_box_2d(self, x, y, xsize, ysize):
"""Return the bounding box
:param x: x center
:param y: y center
:param xsize: x size
:param ysize: y size
:return: list(x1, y1, x2, y2) where (x1, y1) and (x2, y2) are the coordinates of the diagonal points of the
bounding box depending on your coordinates frame
"""
if xsize <= 0 or ysize <= 0:
print("ERROR: can't compute bounding box, xsize or height has no positive value")
return []
return [x-xsize/2, y-ysize/2, x+xsize/2, y+ysize/2]
def compute_qsr(self, bb1, bb2):
"""Wrapper for __compute_qsr
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2)
:return: an RCC depending on your implementation
"""
return self.__compute_qsr(bb1, bb2)
def __compute_qsr(self, bb1, bb2):
"""Replace with your own
:param bb1: diagonal points coordinates of first bounding box (x1, y1, x2, y2)
:param bb2: diagonal points coordinates of second bounding box (x1, y1, x2, y2) :return: an RCC depending on your implementation
:return: an RCC depending on your implementation
"""
raise NotImplementedError("Replace with your code")
def plot_bbs(bb1, bb2):
plt.figure()
ax = plt.gca()
# ax.invert_yaxis()
ax.add_patch(Rectangle((bb1[0], bb1[1]), bb1[2]-bb1[0], bb1[3]-bb1[1], alpha=1, facecolor="blue"))
ax.annotate("o1", (bb1[0], bb1[1]), color='black', weight='bold', fontsize=14)
ax.add_patch(Rectangle((bb2[0], bb2[1]), bb2[2]-bb2[0], bb2[3]-bb2[1], alpha=1, facecolor="red"))
ax.annotate("o2", (bb2[0], bb2[1]), color='black', weight='bold', fontsize=14)
h = 6
l = 0
# ax.set_xlim(l, h)
# ax.set_ylim(l, h)
ax.set_xlim(l, h)
ax.set_ylim(h, l)
plt.show()
if __name__ == '__main__':
dbg = Dbg()
# Play with these to test (x_center, y_center, xsize(i.e. x-size), ysize(i.e. y-size))
o1 = (2.0, 2.0, 2., 2.)
o2 = (4.0, 3.0, 1., 1.)
o1 = dbg.return_bounding_box_2d(o1[0], o1[1], o1[2], o1[3])
o2 = dbg.return_bounding_box_2d(o2[0], o2[1], o2[2], o2[3])
# Bounding boxes
# print("o1:", o1)
# print("o2:", o2)
# Relations
print("o1o2:", dbg.compute_qsr(o1, o2))
print("o2o1:", dbg.compute_qsr(o2, o1))
# Plot the boxes
plot_bbs(o1, o2)
| mit |
matthew-tucker/mne-python | mne/viz/tests/test_topo.py | 7 | 4728 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
from collections import namedtuple
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs
from mne import pick_channels_evoked
from mne.channels import read_layout
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
mne_analyze_colormap)
from mne.viz.topo import _plot_update_evoked_topo
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 340, 341, 342] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_topo():
"""Test plotting of ERP topography
"""
import matplotlib.pyplot as plt
# Show topography
evoked = _get_epochs().average()
plot_topo(evoked) # should auto-find layout
warnings.simplefilter('always', UserWarning)
picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
with warnings.catch_warnings(record=True):
for ylim in [dict(mag=[-600, 600]), None]:
plot_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', 'inaxes')
func(event(inaxes=fig.axes[0]))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
_plot_update_evoked_topo(params, bools)
# should auto-generate layout
plot_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True)
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography
"""
import matplotlib.pyplot as plt
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
cmap = mne_analyze_colormap(format='matplotlib')
plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data
"""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
vmin=0., vmax=14., show=False)
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
run_tests_if_main()
| bsd-3-clause |
amitsela/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 9 | 4504 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in xrange(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(
get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) / max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument('--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument('--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(argv=pipeline_args)
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
# Group each coordinate triplet by its x value, then write the coordinates to
# the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(coordinates
| 'x coord key' >> beam.Map(lambda (x, y, i): (x, (x, y, i)))
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda (k, coords): ' '.join('(%s, %s, %s)' % coord for coord in coords))
| WriteToText(known_args.coordinate_output))
# pylint: enable=expression-not-assigned
return p.run().wait_until_finish()
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 |
simon-pepin/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
ElessarWebb/dummy | src/dummy/viewer/formatting/plotformatters.py | 1 | 2788 | from dummy.viewer.formatting import ResultFormatter, Formatter
import logging
logger = logging.getLogger( __name__ )
try:
import pylab
import numpy
@Formatter.register( 'plot' )
class PlotFormatter( ResultFormatter ):
def __init__( self, *args, **kwargs ):
super( PlotFormatter, self ).__init__( self, *args, **kwargs )
# create the figure
self.figure = pylab.figure( facecolor='white' )
def format_results( self, results, *metrics ):
self.setup( results )
try:
self.plot( results, metrics )
except ( ValueError, TypeError ) as e:
raise Exception(
"Non numeric metrics cannot be plotted"
)
def setup( self, results ):
# get the xlabels
x = range( 1, len( results ) + 1 )
xlabels = [ r.test.name for r in results ]
pylab.title( 'Metric values per test (commit: %s)' % results[0].commit, fontsize=22 )
pylab.xticks( rotation=90 )
pylab.grid( True, markevery='integer' )
pylab.xlabel( 'tests', fontsize=16 )
pylab.margins( 0.05 )
pylab.xticks( x, xlabels )
def plot( self, results, metrics, **opts ):
# create the plots
plots = []
for metric in metrics:
plots.append( self.plot_metric( results, metric , **opts ))
# legendary
pylab.legend([ p[0] for p in plots], metrics )
# and show it
pylab.show()
def plot_metric( self, results, metric, **opts ):
x = range( 1, len( results ) + 1 )
y = [ t.get_metric( metric ) for t in results ]
try:
plot = pylab.plot( x, y, **opts )
pylab.setp( plot,
label=metric,
linestyle='dashed',
linewidth=1.0,
marker=".",
markersize=12.0,
aa=True
)
return plot
except ( ValueError, TypeError ) as e:
raise Exception(
"The metric `%s` is not numeric and can thus not be plotted." % metric
)
@Formatter.register( 'plot.bar' )
class BarPlotFormatter( PlotFormatter ):
def plot( self, results, metrics, **opts ):
# create the plots
plots = []
x = numpy.arange( len( results ))
margin = 0.2 / len( metrics )
width = 0.8 / len( metrics )
colors = [
( i/( 2 * len( metrics )), i/len(metrics), 0.8 )
for i in range( 1, len( metrics ) + 1)
]
for i, metric in enumerate( metrics ):
# compute the bar heights
y = [ t.get_metric( metric ) or 0 for t in results ]
plot = self.bar(
x + 0.5 + i*width + ( i ) * margin,
y,
width=width,
color=colors[i],
)
plots.append( plot )
pylab.setp( plot,
label=metric,
aa=True
)
# legendary
pylab.legend([ p[0] for p in plots], metrics )
# and show it
pylab.show()
def bar( self, *args, **kwargs ):
return pylab.bar( *args, **kwargs )
except ImportError:
logger.debug( "matplotlib is not installed, PlotFormatter not available." )
| mit |
chriscrosscutler/scikit-image | doc/ext/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
maxyeg/beaker-notebook | plugin/ipythonPlugins/src/dist/python3/beaker_runtime3.py | 5 | 18118 | # Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, json, pandas, numpy
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, IPython, datetime, calendar, math, traceback, time
from IPython.utils.traitlets import Unicode
class OutputContainer:
def __init__(self):
self.items = []
def clear(self):
self.items = [ ]
def addItem(self, obj):
self.items.append(obj)
def getItems(self):
return self.items
class BeakerCodeCell:
def __init__(self, cellId, evaluatorId):
self.cellId = cellId
self.evaluatorId = evaluatorId
self.code = ''
self.outputtype = ''
self.output = None
self.tags = ''
def getCellId(self):
return self.cellId
def getEvaluatorId(self):
return self.evaluatorId
def getCode(self):
return self.code
def getOutputType(self):
return self.outputtype
def getOutput(self):
return self.output
def getTags(self):
return self.tags
def convertTypeName(typ):
if typ.startswith("float"):
return "double"
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return "integer"
if typ.startswith("bool"):
return "boolean"
if typ.startswith("date") or typ.startswith("Time"):
return "time"
return "string"
def isPrimitiveType(typ):
if typ.startswith("float"):
return True
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return True
if typ.startswith("bool"):
return True
if typ.startswith("date") or typ.startswith("Time"):
return True
if typ.startswith("str"):
return True
return False
def isListOfMaps(data):
if type(data) != list:
return False
for w in data:
if type(w) != dict:
return False
for v in w.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def isDictionary(data):
if type(data) != dict:
return False
for v in data.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def transformNaN(obj):
if not isinstance(obj, float):
return obj
if math.isnan(obj):
return "Nan";
if math.isinf(obj):
if obj>0:
return "Infinity"
else:
return "-Infinity"
return obj
def transformNaNs(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, float):
continue
if math.isnan(i):
obj[x] = "NaN";
if math.isinf(i):
if i>0:
obj[x] = "Infinity"
else:
obj[x] = "-Infinity"
def fixNaNBack(obj):
if not isinstance(obj, str):
return obj
if obj == "NaN":
return float('nan')
if obj == "Infinity":
return float('inf')
if obj == "-Infinity":
return float('-inf')
return obj
def fixNaNsBack(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, str):
continue
if i == "NaN":
obj[x] = float('nan')
if i == "Infinity":
obj[x] = float('inf')
if i == "-Infinity":
obj[x] = float('-inf')
def transform(obj):
if type(obj) == bytes:
return str(obj)
if isListOfMaps(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "ListOfMaps"
cols = []
for l in obj:
cols.extend(l.keys())
cols = list(set(cols))
out['columnNames'] = cols
vals = []
for l in obj:
row = []
for r in cols:
if r in l:
row.append(transform(l[r]))
else:
row.append('')
vals.append(row)
out['values'] = vals
return out
if isDictionary(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Key", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[k] = transform(v)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transform(v))
return out
if isinstance(obj, OutputContainer):
out = {}
out['type'] = "OutputContainer"
items = []
for v in obj.getItems():
items.append(transform(v))
out['items'] = items
return out
if isinstance(obj, BeakerCodeCell):
out = {}
out['type'] = "BeakerCodeCell"
out['cellId'] = obj.getCellId()
out['evaluatorId'] = obj.getEvaluatorId()
out['code'] = obj.getCode()
out['outputtype'] = obj.getOutputType()
out['output'] = transform(obj.getOutput())
out['tags'] = obj.getTags()
return out
return transformNaN(obj)
def transformBack(obj):
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[str(k)] = transformBack(v)
if "type" in out:
if out['type'] == "BeakerCodeCell":
c = BeakerCodeCell(out['cellId'], out['evaluatorId'])
if 'code' in out:
c.code = out['code']
if 'outputtype' in out:
c.outputtype = out['outputtype']
if 'output' in out:
c.output = transformBack(out['output'])
if 'tags' in out:
c.tags = out['tags']
return c
if out['type'] == "OutputContainer":
c = OutputContainer()
if 'items' in out:
for i in out['items']:
c.addItem(i)
return c;
if out['type'] == "Date":
return datetime.datetime.fromtimestamp(out["timestamp"]/1000)
if out['type'] == "TableDisplay":
if 'subtype' in out:
if out['subtype'] == "Dictionary":
out2 = { }
for r in out['values']:
out2[r[0]] = fixNaNBack(r[1])
if out['columnNames'][0] == "Index":
return pandas.Series(out2)
return out2
if out['subtype'] == "Matrix":
vals = out['values']
fixNaNsBack(vals)
return numpy.matrix(vals)
if out['subtype'] == "ListOfMaps":
out2 = []
cnames = out['columnNames']
for r in out['values']:
out3 = { }
for i in range(len(cnames)):
if r[i] != '':
out3[ cnames[i] ] = r[i]
out2.append(out3)
return out2
# transform to dataframe
# first column becomes the index
vals = out['values']
cnames = out['columnNames'][1:]
index = []
for x in range(0,len(vals)):
index.append(transformBack(vals[x][0]))
v = vals[x][1:]
fixNaNsBack(v)
vals[x] = v
return pandas.DataFrame(data=vals, columns=cnames, index=index)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transformBack(v))
return out
try:
if type(obj) == bytes:
obj = str(obj)
except Exception as e:
return obj
return obj
# should be inner class to Beaker
class DataFrameEncoder(json.JSONEncoder):
def default(self, obj):
# similarly handle Panels.
# make this extensible by the user to handle their own types.
if isinstance(obj, numpy.generic):
return transformNaN(obj.item())
if isinstance(obj, numpy.ndarray) and obj.ndim == 2:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Matrix"
cols = [ ]
for i in range(obj.shape[1]):
cols.append( "c" + str(i) )
out['columnNames'] =cols
vars = obj.tolist()
for x in range(0,len(vars)):
transformNaNs(vars[x])
out['values'] = vars
return out
if isinstance(obj, numpy.ndarray):
ret = obj.tolist()
transformNaNs(ret)
return ret
if type(obj) == datetime.datetime or type(obj) == datetime.date or type(obj).__name__ == 'Timestamp':
out = {}
out['type'] = "Date"
out['timestamp'] = int(obj.strftime("%s"))*1000
return out
if type(obj) == pandas.core.frame.DataFrame:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "TableDisplay"
out['columnNames'] = ['Index'] + obj.columns.tolist()
vals = obj.values.tolist()
idx = obj.index.tolist()
for x in range(0,len(vals)):
vals[x] = [ idx[x] ] + vals[x]
ty = []
num = len(obj.columns.tolist())
x = 0;
for x in range(0,num+1):
ty.append( convertTypeName(type(vals[0][x]).__name__))
out['types'] = ty
for x in range(0,len(vals)):
transformNaNs(vals[x])
out['values'] = vals
return out
if type(obj) == pandas.core.series.Series:
basict = True
for i in range(len(obj)):
if not isPrimitiveType(type(obj[i]).__name__):
basict = False
break
if basict:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Index", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
return obj.to_dict()
return json.JSONEncoder.default(self, obj)
class MyJSONFormatter(IPython.core.formatters.BaseFormatter):
format_type = Unicode('application/json')
def __call__(self, obj):
try:
obj = transform(obj)
return json.dumps(obj, cls=DataFrameEncoder)
except Exception as e:
#print(e)
#traceback.print_exc()
return None
class Beaker:
"""Runtime support for Python code in Beaker."""
session_id = ''
core_url = '127.0.0.1:' + os.environ['beaker_core_port']
_beaker_password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
_beaker_password_mgr.add_password(None, core_url, 'beaker',
os.environ['beaker_core_password'])
_beaker_url_opener = urllib.request.build_opener(urllib.request.HTTPBasicAuthHandler(_beaker_password_mgr), urllib.request.ProxyHandler({}))
def set4(self, var, val, unset, sync):
args = {'name': var, 'session':self.session_id, 'sync':sync}
if not unset:
val = transform(val)
args['value'] = json.dumps(val, cls=DataFrameEncoder)
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/set',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
reply = conn.read().decode("utf-8")
if reply != 'ok':
raise NameError(reply)
def get(self, var):
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/get?' +
urllib.parse.urlencode({
'name': var,
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
if not result['defined']:
raise NameError('name \'' + var + '\' is not defined in notebook namespace')
return transformBack(result['value'])
def set_session(self, id):
self.session_id = id
def register_output(self):
ip = IPython.InteractiveShell.instance()
ip.display_formatter.formatters['application/json'] = MyJSONFormatter(parent=ip.display_formatter)
def set(self, var, val):
return self.set4(var, val, False, True)
def createOutputContainer(self):
return OutputContainer()
def showProgressUpdate(self):
return "WARNING: python3 language plugin does not support progress updates"
def evaluate(self,filter):
args = {'filter': filter, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluate',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def evaluateCode(self, evaluator,code):
args = {'evaluator': evaluator, 'code' : code, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluateCode',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def showStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def clearStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/clearStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def showTransientStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showTransientStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def getEvaluators(self):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getEvaluators?' +
urllib.parse.urlencode({
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def getCodeCells(self,filter):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getCodeCells?' +
urllib.parse.urlencode({
'session':self.session_id, 'filter':filter}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def setCodeCellBody(self,name,body):
args = {'name': name, 'body':body, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellBody',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellEvaluator(self,name,evaluator):
args = {'name': name, 'evaluator':evaluator, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellEvaluator',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellTags(self,name,tags):
args = {'name': name, 'tags':tags, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellTags',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def __setattr__(self, name, value):
if 'session_id' == name:
self.__dict__['session_id'] = value
return
return self.set(name, value)
def __getattr__(self, name):
return self.get(name)
| apache-2.0 |
pietroquaglio/elephant | elephant/test/test_pandas_bridge.py | 2 | 113211 | # -*- coding: utf-8 -*-
"""
Unit tests for the pandas bridge module.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
import unittest
from itertools import chain
from neo.test.generate_datasets import fake_neo
import numpy as np
from numpy.testing import assert_array_equal
import quantities as pq
try:
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_index_equal
except ImportError:
HAVE_PANDAS = False
else:
import elephant.pandas_bridge as ep
HAVE_PANDAS = True
if HAVE_PANDAS:
# Currying, otherwise the unittest will break with pandas>=0.16.0
# parameter check_names is introduced in a newer versions than 0.14.0
# this test is written for pandas 0.14.0
def assert_index_equal(left, right):
try:
# pandas>=0.16.0
return pd.util.testing.assert_index_equal(left, right,
check_names=False)
except TypeError:
# pandas older version
return pd.util.testing.assert_index_equal(left, right)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiindexFromDictTestCase(unittest.TestCase):
def test__multiindex_from_dict(self):
inds = {'test1': 6.5,
'test2': 5,
'test3': 'test'}
targ = pd.MultiIndex(levels=[[6.5], [5], ['test']],
labels=[[0], [0], [0]],
names=['test1', 'test2', 'test3'])
res0 = ep._multiindex_from_dict(inds)
self.assertEqual(targ.levels, res0.levels)
self.assertEqual(targ.names, res0.names)
self.assertEqual(targ.labels, res0.labels)
def _convert_levels(levels):
"""Convert a list of levels to the format pandas returns for a MultiIndex.
Parameters
----------
levels : list
The list of levels to convert.
Returns
-------
list
The the level in `list` converted to values like what pandas will give.
"""
levels = list(levels)
for i, level in enumerate(levels):
if hasattr(level, 'lower'):
try:
level = unicode(level)
except NameError:
pass
elif hasattr(level, 'date'):
levels[i] = pd.DatetimeIndex(data=[level])
continue
elif level is None:
levels[i] = pd.Index([])
continue
# pd.Index around pd.Index to convert to Index structure if MultiIndex
levels[i] = pd.Index(pd.Index([level]))
return levels
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class ConvertValueSafeTestCase(unittest.TestCase):
def test__convert_value_safe__float(self):
targ = 5.5
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__str(self):
targ = 'test'
value = targ
res = ep._convert_value_safe(value)
self.assertIs(res, targ)
def test__convert_value_safe__bytes(self):
targ = 'test'
value = b'test'
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
def test__convert_value_safe__numpy_int_scalar(self):
targ = 5
value = np.array(5)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_float_scalar(self):
targ = 5.
value = np.array(5.)
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_unicode_scalar(self):
targ = u'test'
value = np.array('test', dtype='U')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__numpy_str_scalar(self):
targ = u'test'
value = np.array('test', dtype='S')
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res, 'dtype'))
def test__convert_value_safe__quantity_scalar(self):
targ = (10., 'ms')
value = 10. * pq.ms
res = ep._convert_value_safe(value)
self.assertEqual(res, targ)
self.assertFalse(hasattr(res[0], 'dtype'))
self.assertFalse(hasattr(res[0], 'units'))
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SpiketrainToDataframeTestCase(unittest.TestCase):
def test__spiketrain_to_dataframe__parents_empty(self):
obj = fake_neo('SpikeTrain', seed=0)
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, child_first=False)
res3 = ep.spiketrain_to_dataframe(obj, parents=True)
res4 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=True)
res5 = ep.spiketrain_to_dataframe(obj, parents=True,
child_first=False)
res6 = ep.spiketrain_to_dataframe(obj, parents=False)
res7 = ep.spiketrain_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
self.assertEqual(len(obj), len(res4.index))
self.assertEqual(len(obj), len(res5.index))
self.assertEqual(len(obj), len(res6.index))
self.assertEqual(len(obj), len(res7.index))
self.assertEqual(len(obj), len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(['spike_number'], res4.index.names)
self.assertEqual(['spike_number'], res5.index.names)
self.assertEqual(['spike_number'], res6.index.names)
self.assertEqual(['spike_number'], res7.index.names)
self.assertEqual(['spike_number'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, parents=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=False,
child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj)
res1 = ep.spiketrain_to_dataframe(obj, child_first=True)
res2 = ep.spiketrain_to_dataframe(obj, parents=True)
res3 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=True)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
self.assertEqual(len(obj), len(res2.index))
self.assertEqual(len(obj), len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(['spike_number'], res2.index.names)
self.assertEqual(['spike_number'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__spiketrain_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=0)
obj = blk.list_children_by_class('SpikeTrain')[0]
res0 = ep.spiketrain_to_dataframe(obj, child_first=False)
res1 = ep.spiketrain_to_dataframe(obj, parents=True, child_first=False)
targvalues = pq.Quantity(obj.magnitude, units=obj.units)
targvalues = targvalues.rescale('s').magnitude[np.newaxis].T
targindex = np.arange(len(targvalues))
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(len(obj), len(res0.index))
self.assertEqual(len(obj), len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['spike_number'], res0.index.names)
self.assertEqual(['spike_number'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EventToDataframeTestCase(unittest.TestCase):
def test__event_to_dataframe__parents_empty(self):
obj = fake_neo('Event', seed=42)
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, child_first=False)
res3 = ep.event_to_dataframe(obj, parents=True)
res4 = ep.event_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.event_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.event_to_dataframe(obj, parents=False)
res7 = ep.event_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.event_to_dataframe(obj, parents=False, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
assert_array_equal(targindex, res4.index)
assert_array_equal(targindex, res5.index)
assert_array_equal(targindex, res6.index)
assert_array_equal(targindex, res7.index)
assert_array_equal(targindex, res8.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(['times'], res4.index.names)
self.assertEqual(['times'], res5.index.names)
self.assertEqual(['times'], res6.index.names)
self.assertEqual(['times'], res7.index.names)
self.assertEqual(['times'], res8.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, parents=False)
res1 = ep.event_to_dataframe(obj, parents=False, child_first=False)
res2 = ep.event_to_dataframe(obj, parents=False, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj)
res1 = ep.event_to_dataframe(obj, child_first=True)
res2 = ep.event_to_dataframe(obj, parents=True)
res3 = ep.event_to_dataframe(obj, parents=True, child_first=True)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
assert_array_equal(targindex, res2.index)
assert_array_equal(targindex, res3.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(['times'], res2.index.names)
self.assertEqual(['times'], res3.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__event_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Event')[0]
res0 = ep.event_to_dataframe(obj, child_first=False)
res1 = ep.event_to_dataframe(obj, parents=True, child_first=False)
targvalues = obj.labels[:len(obj.times)][np.newaxis].T.astype('U')
targindex = obj.times[:len(obj.labels)].rescale('s').magnitude
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targindex, res0.index)
assert_array_equal(targindex, res1.index)
self.assertEqual(['times'], res0.index.names)
self.assertEqual(['times'], res1.index.names)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class EpochToDataframeTestCase(unittest.TestCase):
def test__epoch_to_dataframe__parents_empty(self):
obj = fake_neo('Epoch', seed=42)
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, child_first=False)
res3 = ep.epoch_to_dataframe(obj, parents=True)
res4 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
res5 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
res6 = ep.epoch_to_dataframe(obj, parents=False)
res7 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res8 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(1, len(res4.columns))
self.assertEqual(1, len(res5.columns))
self.assertEqual(1, len(res6.columns))
self.assertEqual(1, len(res7.columns))
self.assertEqual(1, len(res8.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res4.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res5.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res6.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res7.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res8.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
assert_array_equal(targvalues, res4.values)
assert_array_equal(targvalues, res5.values)
assert_array_equal(targvalues, res6.values)
assert_array_equal(targvalues, res7.values)
assert_array_equal(targvalues, res8.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual(keys, res4.columns.names)
self.assertEqual(keys, res5.columns.names)
self.assertEqual(keys, res6.columns.names)
self.assertEqual(keys, res7.columns.names)
self.assertEqual(keys, res8.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual([u'durations', u'times'], res4.index.names)
self.assertEqual([u'durations', u'times'], res5.index.names)
self.assertEqual([u'durations', u'times'], res6.index.names)
self.assertEqual([u'durations', u'times'], res7.index.names)
self.assertEqual([u'durations', u'times'], res8.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
self.assertEqual(2, len(res4.index.levels))
self.assertEqual(2, len(res5.index.levels))
self.assertEqual(2, len(res6.index.levels))
self.assertEqual(2, len(res7.index.levels))
self.assertEqual(2, len(res8.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
assert_array_equal(targindex, res4.index.levels)
assert_array_equal(targindex, res5.index.levels)
assert_array_equal(targindex, res6.index.levels)
assert_array_equal(targindex, res7.index.levels)
assert_array_equal(targindex, res8.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res4.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res5.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res6.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res7.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res8.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__noparents(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, parents=False)
res1 = ep.epoch_to_dataframe(obj, parents=False, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=False, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=False,
child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_childfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj)
res1 = ep.epoch_to_dataframe(obj, child_first=True)
res2 = ep.epoch_to_dataframe(obj, parents=True)
res3 = ep.epoch_to_dataframe(obj, parents=True, child_first=True)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True, child_first=True)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(1, len(res2.columns))
self.assertEqual(1, len(res3.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res2.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res3.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
assert_array_equal(targvalues, res2.values)
assert_array_equal(targvalues, res3.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual(keys, res2.columns.names)
self.assertEqual(keys, res3.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual([u'durations', u'times'], res2.index.names)
self.assertEqual([u'durations', u'times'], res3.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
self.assertEqual(2, len(res2.index.levels))
self.assertEqual(2, len(res3.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
assert_array_equal(targindex, res2.index.levels)
assert_array_equal(targindex, res3.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res2.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res3.columns.levels):
assert_index_equal(value, level)
def test__epoch_to_dataframe__parents_parentfirst(self):
blk = fake_neo('Block', seed=42)
obj = blk.list_children_by_class('Epoch')[0]
res0 = ep.epoch_to_dataframe(obj, child_first=False)
res1 = ep.epoch_to_dataframe(obj, parents=True, child_first=False)
minlen = min([len(obj.times), len(obj.durations), len(obj.labels)])
targvalues = obj.labels[:minlen][np.newaxis].T.astype('U')
targindex = np.vstack([obj.durations[:minlen].rescale('s').magnitude,
obj.times[:minlen].rescale('s').magnitude])
targvalues = targvalues[targindex.argsort()[0], :]
targindex.sort()
attrs = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=False)
keys, values = zip(*sorted(attrs.items()))
values = _convert_levels(values)
self.assertEqual(1, len(res0.columns))
self.assertEqual(1, len(res1.columns))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res0.index))
self.assertEqual(min(len(obj.times), len(obj.durations),
len(obj.labels)),
len(res1.index))
assert_array_equal(targvalues, res0.values)
assert_array_equal(targvalues, res1.values)
self.assertEqual(keys, res0.columns.names)
self.assertEqual(keys, res1.columns.names)
self.assertEqual([u'durations', u'times'], res0.index.names)
self.assertEqual([u'durations', u'times'], res1.index.names)
self.assertEqual(2, len(res0.index.levels))
self.assertEqual(2, len(res1.index.levels))
assert_array_equal(targindex, res0.index.levels)
assert_array_equal(targindex, res1.index.levels)
for value, level in zip(values, res0.columns.levels):
assert_index_equal(value, level)
for value, level in zip(values, res1.columns.levels):
assert_index_equal(value, level)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiSpiketrainsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_spiketrains_to_dataframe__single(self):
obj = fake_neo('SpikeTrain', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res4 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res7 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.spiketrain_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = len(obj)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_spiketrains_to_dataframe__unit_default(self):
obj = fake_neo('Unit', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = obj.spiketrains
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('SpikeTrain')
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, parents=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_spiketrains_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True)
res2 = ep.multi_spiketrains_to_dataframe(obj, child_first=True)
res3 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_spiketrains_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(obj, child_first=False)
res1 = ep.multi_spiketrains_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj,
parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_spiketrains_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_spiketrains_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
def test__multi_spiketrains_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_spiketrains_to_dataframe(obj)
objs = (iobj.list_children_by_class('SpikeTrain') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.spiketrain_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = max(len(iobj) for iobj in objs)
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(targ.values, res0.values)
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEventsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_events_to_dataframe__single(self):
obj = fake_neo('Event', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=False)
res2 = ep.multi_events_to_dataframe(obj, parents=True)
res3 = ep.multi_events_to_dataframe(obj, child_first=True)
res4 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_events_to_dataframe(obj, child_first=False)
res7 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.event_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_events_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_events_to_dataframe(obj)
objs = obj.events
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res3.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Event')
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, parents=False)
res1 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_events_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_events_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj)
res1 = ep.multi_events_to_dataframe(obj, parents=True)
res2 = ep.multi_events_to_dataframe(obj, child_first=True)
res3 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res3.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_events_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(obj, child_first=False)
res1 = ep.multi_events_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_events_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_events_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('Event') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_events_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_events_to_dataframe(obj)
objs = (iobj.list_children_by_class('Event') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.event_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.labels))]
for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class MultiEpochsToDataframeTestCase(unittest.TestCase):
def setUp(self):
if hasattr(self, 'assertItemsEqual'):
self.assertCountEqual = self.assertItemsEqual
def test__multi_epochs_to_dataframe__single(self):
obj = fake_neo('Epoch', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False)
res2 = ep.multi_epochs_to_dataframe(obj, parents=True)
res3 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res4 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res5 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
res6 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res7 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
res8 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
targ = ep.epoch_to_dataframe(obj)
keys = ep._extract_neo_attrs_safe(obj, parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = 1
targlen = min(len(obj.times), len(obj.durations), len(obj.labels))
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targwidth, len(res4.columns))
self.assertEqual(targwidth, len(res5.columns))
self.assertEqual(targwidth, len(res6.columns))
self.assertEqual(targwidth, len(res7.columns))
self.assertEqual(targwidth, len(res8.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertEqual(targlen, len(res4.index))
self.assertEqual(targlen, len(res5.index))
self.assertEqual(targlen, len(res6.index))
self.assertEqual(targlen, len(res7.index))
self.assertEqual(targlen, len(res8.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
self.assertCountEqual(keys, res4.columns.names)
self.assertCountEqual(keys, res5.columns.names)
self.assertCountEqual(keys, res6.columns.names)
self.assertCountEqual(keys, res7.columns.names)
self.assertCountEqual(keys, res8.columns.names)
assert_array_equal(targ.values, res0.values)
assert_array_equal(targ.values, res1.values)
assert_array_equal(targ.values, res2.values)
assert_array_equal(targ.values, res3.values)
assert_array_equal(targ.values, res4.values)
assert_array_equal(targ.values, res5.values)
assert_array_equal(targ.values, res6.values)
assert_array_equal(targ.values, res7.values)
assert_array_equal(targ.values, res8.values)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_frame_equal(targ, res4)
assert_frame_equal(targ, res5)
assert_frame_equal(targ, res6)
assert_frame_equal(targ, res7)
assert_frame_equal(targ, res8)
def test__multi_epochs_to_dataframe__segment_default(self):
obj = fake_neo('Segment', seed=0, n=5)
res0 = ep.multi_epochs_to_dataframe(obj)
objs = obj.epochs
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_epochs_to_dataframe__block_noparents(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj, parents=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_epochs_to_dataframe__block_parents_childfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True)
res2 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res3 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res3.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_epochs_to_dataframe__block_parents_parentfirst(self):
obj = fake_neo('Block', seed=0, n=3)
res0 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
objs = obj.list_children_by_class('Epoch')
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_epochs_to_dataframe__list_noparents(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(obj, parents=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=True)
res2 = ep.multi_epochs_to_dataframe(obj, parents=False,
child_first=False)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj, parents=False, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=False,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
def test__multi_epochs_to_dataframe__list_parents_childfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(obj)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True)
res2 = ep.multi_epochs_to_dataframe(obj, child_first=True)
res3 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=True)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=True)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targwidth, len(res2.columns))
self.assertEqual(targwidth, len(res3.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertEqual(targlen, len(res2.index))
self.assertEqual(targlen, len(res3.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
self.assertCountEqual(keys, res2.columns.names)
self.assertCountEqual(keys, res3.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res2.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res3.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
def test__multi_epochs_to_dataframe__list_parents_parentfirst(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(obj, child_first=False)
res1 = ep.multi_epochs_to_dataframe(obj, parents=True,
child_first=False)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj, parents=True, child_first=False)
for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=False).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targwidth, len(res1.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertEqual(targlen, len(res1.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
self.assertCountEqual(keys, res1.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res1.values, dtype=np.float))
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
def test__multi_epochs_to_dataframe__tuple_default(self):
obj = tuple(fake_neo('Block', seed=i, n=3) for i in range(3))
res0 = ep.multi_epochs_to_dataframe(obj)
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_epochs_to_dataframe__iter_default(self):
obj = [fake_neo('Block', seed=i, n=3) for i in range(3)]
res0 = ep.multi_epochs_to_dataframe(iter(obj))
objs = (iobj.list_children_by_class('Epoch') for iobj in obj)
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
def test__multi_epochs_to_dataframe__dict_default(self):
obj = dict((i, fake_neo('Block', seed=i, n=3)) for i in range(3))
res0 = ep.multi_epochs_to_dataframe(obj)
objs = (iobj.list_children_by_class('Epoch') for iobj in
obj.values())
objs = list(chain.from_iterable(objs))
targ = [ep.epoch_to_dataframe(iobj) for iobj in objs]
targ = ep._sort_inds(pd.concat(targ, axis=1), axis=1)
keys = ep._extract_neo_attrs_safe(objs[0], parents=True,
child_first=True).keys()
keys = list(keys)
targwidth = len(objs)
targlen = [iobj.times[:min(len(iobj.times), len(iobj.durations),
len(iobj.labels))] for iobj in objs]
targlen = len(np.unique(np.hstack(targlen)))
self.assertGreater(len(objs), 0)
self.assertEqual(targwidth, len(targ.columns))
self.assertEqual(targwidth, len(res0.columns))
self.assertEqual(targlen, len(targ.index))
self.assertEqual(targlen, len(res0.index))
self.assertCountEqual(keys, targ.columns.names)
self.assertCountEqual(keys, res0.columns.names)
assert_array_equal(
np.array(targ.values, dtype=np.float),
np.array(res0.values, dtype=np.float))
assert_frame_equal(targ, res0)
@unittest.skipUnless(HAVE_PANDAS, 'requires pandas')
class SliceSpiketrainTestCase(unittest.TestCase):
def setUp(self):
obj = [fake_neo('SpikeTrain', seed=i, n=3) for i in range(10)]
self.obj = ep.multi_spiketrains_to_dataframe(obj)
def test_single_none(self):
targ_start = self.obj.columns.get_level_values('t_start').values
targ_stop = self.obj.columns.get_level_values('t_stop').values
res0 = ep.slice_spiketrain(self.obj)
res1 = ep.slice_spiketrain(self.obj, t_start=None)
res2 = ep.slice_spiketrain(self.obj, t_stop=None)
res3 = ep.slice_spiketrain(self.obj, t_start=None, t_stop=None)
res0_start = res0.columns.get_level_values('t_start').values
res1_start = res1.columns.get_level_values('t_start').values
res2_start = res2.columns.get_level_values('t_start').values
res3_start = res3.columns.get_level_values('t_start').values
res0_stop = res0.columns.get_level_values('t_stop').values
res1_stop = res1.columns.get_level_values('t_stop').values
res2_stop = res2.columns.get_level_values('t_stop').values
res3_stop = res3.columns.get_level_values('t_stop').values
targ = self.obj
self.assertFalse(res0 is targ)
self.assertFalse(res1 is targ)
self.assertFalse(res2 is targ)
self.assertFalse(res3 is targ)
assert_frame_equal(targ, res0)
assert_frame_equal(targ, res1)
assert_frame_equal(targ, res2)
assert_frame_equal(targ, res3)
assert_array_equal(targ_start, res0_start)
assert_array_equal(targ_start, res1_start)
assert_array_equal(targ_start, res2_start)
assert_array_equal(targ_start, res3_start)
assert_array_equal(targ_stop, res0_stop)
assert_array_equal(targ_stop, res1_stop)
assert_array_equal(targ_stop, res2_stop)
assert_array_equal(targ_stop, res3_stop)
def test_single_t_start(self):
targ_start = .0001
targ_stop = self.obj.columns.get_level_values('t_stop').values
res0 = ep.slice_spiketrain(self.obj, t_start=targ_start)
res1 = ep.slice_spiketrain(self.obj, t_start=targ_start, t_stop=None)
res0_start = res0.columns.get_level_values('t_start').unique().tolist()
res1_start = res1.columns.get_level_values('t_start').unique().tolist()
res0_stop = res0.columns.get_level_values('t_stop').values
res1_stop = res1.columns.get_level_values('t_stop').values
targ = self.obj.values
targ[targ < targ_start] = np.nan
self.assertFalse(res0 is targ)
self.assertFalse(res1 is targ)
assert_array_equal(targ, res0.values)
assert_array_equal(targ, res1.values)
self.assertEqual([targ_start], res0_start)
self.assertEqual([targ_start], res1_start)
assert_array_equal(targ_stop, res0_stop)
assert_array_equal(targ_stop, res1_stop)
def test_single_t_stop(self):
targ_start = self.obj.columns.get_level_values('t_start').values
targ_stop = .0009
res0 = ep.slice_spiketrain(self.obj, t_stop=targ_stop)
res1 = ep.slice_spiketrain(self.obj, t_stop=targ_stop, t_start=None)
res0_start = res0.columns.get_level_values('t_start').values
res1_start = res1.columns.get_level_values('t_start').values
res0_stop = res0.columns.get_level_values('t_stop').unique().tolist()
res1_stop = res1.columns.get_level_values('t_stop').unique().tolist()
targ = self.obj.values
targ[targ > targ_stop] = np.nan
self.assertFalse(res0 is targ)
self.assertFalse(res1 is targ)
assert_array_equal(targ, res0.values)
assert_array_equal(targ, res1.values)
assert_array_equal(targ_start, res0_start)
assert_array_equal(targ_start, res1_start)
self.assertEqual([targ_stop], res0_stop)
self.assertEqual([targ_stop], res1_stop)
def test_single_both(self):
targ_start = .0001
targ_stop = .0009
res0 = ep.slice_spiketrain(self.obj,
t_stop=targ_stop, t_start=targ_start)
res0_start = res0.columns.get_level_values('t_start').unique().tolist()
res0_stop = res0.columns.get_level_values('t_stop').unique().tolist()
targ = self.obj.values
targ[targ < targ_start] = np.nan
targ[targ > targ_stop] = np.nan
self.assertFalse(res0 is targ)
assert_array_equal(targ, res0.values)
self.assertEqual([targ_start], res0_start)
self.assertEqual([targ_stop], res0_stop)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
leogulus/pisco_pipeline | pisco_photometry_all_2019.py | 1 | 76497 | import sys, os, re, yaml, subprocess, shlex, FITS_tools
import pandas as pd
import numpy as np
import pickle
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import image
import matplotlib.cm as cm
import matplotlib.image as mpimg
from scipy.optimize import curve_fit
import scipy.integrate as integrate
from scipy import interpolate
from scipy.interpolate import interp1d
import scipy.stats
from astropy.io import fits
from astropy.table import Table, join
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=71, Om0=0.3, Tcmb0=2.725)
import extra_program as ex
from PIL import Image as Image_PIL
import ebvpy #Galactic Reddening
"""
Example:
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf allslr 2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf allslr no2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 psf noslr no2mass
python pisco_pipeline/pisco_photometry_all_2019.py PKS1353 auto noslr no2mass
field: name of the fields
mode: psf, auto, aper, hybrid, model
allslr:
- allslr: run everything including photometry_v4, cut_frame, SLR
- slr: run just SLR and update the color
- noslr: don't run slr, just update the color with different modes
2mass
- 2mass: run SLR with 2MASS to match
- no2mass: run SLR without 2MASS
"""
###--------------------------------------------------------------------------###
def find_seeing(field,band):
df_see=pd.read_csv('/Users/taweewat/Documents/red_sequence/total_chips_field_seeing.csv',index_col=0)
if field[0:5]=='CHIPS':
seeing = df_see[df_see.chips==field]['seeing_q25_%s'%band].values[0] #_%s'%band
return seeing
elif (field[0:5]=='Field')|(field[0:3]=='PKS')|(field[0:4]=='SDSS'):
seeing = df_see[df_see.name==field]['seeing_q25_%s'%band].values[0] #_%s'%band
return seeing
def find_seeing_new(dir,field):
myReg3=re.compile(r'(CHIPS)[^\_]*\_[^\_]*')
seeing = float(fits.open(list_file_name(dir,myReg3.search(field).group())[0])[0].header['FWHM1'])
return seeing
def find_seeing_fits(field):
home='/Users/taweewat/Documents/pisco_code/'
dirs=['ut170103/','ut170104/','ut170619/','ut170621/','ut170624/','ut171208/',\
'ut171209/','ut171212/','ut190412/','ut190413/']
myReg=re.compile(r'(%s_A).*'%field)
for di in dirs:
dir=home+di
for text in os.listdir(dir):
if myReg.search(text) != None:
seeing=float(fits.open(dir+myReg.search(text).group())[0].header['FWHM1'])
return seeing
def read_param():
with open("pisco_pipeline/params.yaml", 'r') as stream:
try:
param=yaml.load(stream, Loader=yaml.FullLoader)
return param
except yaml.YAMLError as exc:
print(exc)
def read_param_izp(mode):
if mode=='psf':
mode_izp=''
elif mode=='model':
mode_izp='' #'_model'
else:
mode_izp=''
# print "/Users/taweewat/Documents/pisco_code/pisco_pipeline/params_izeropoint%s.yaml" % mode_izp
with open("/Users/taweewat/Documents/pisco_code/pisco_pipeline/params_izeropoint%s.yaml"%mode_izp, 'r') as stream:
try:
param=yaml.load(stream, Loader=yaml.FullLoader)
return param
except yaml.YAMLError as exc:
print(exc)
def star_galaxy_bleem(field):
sg_dir = 'star_galaxy'
if not os.path.exists(sg_dir):
os.makedirs(sg_dir)
param=read_param()
# seeing=find_seeing(field,'i')
# seeing=find_seeing_fits(field)
seeing = 1.0
# seeing=1.5
# seeing=0.95
minarea=1.7
data, header = fits.getdata('final/coadd_c%s_i.fits'%field, header=True)
data2=data**2
# pxscale=0.11
pxscale=0.22
fits.writeto('final/coadd_c%s_sq_i.fits'%field, data2, header=header, overwrite=True)
cmd='sex final/coadd_c%s_i.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -CATALOG_TYPE FITS_1.0 -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 15 -PIXEL_SCALE %s -DETECT_MINAREA %s -CHECKIMAGE_NAME checki.fits,segmenti.fits'%\
(field,'sex.param',sg_dir+'/%s_catalog.fits'%(field),str(seeing),str(param['satur_level_i_psf']),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2)); print cmd
subprocess.check_call(shlex.split(cmd))
cmd='sex final/coadd_c%s_i.fits,final/coadd_c%s_sq_i.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -CATALOG_TYPE FITS_1.0 -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 15 -PIXEL_SCALE %s -DETECT_MINAREA %s'%\
(field,field,'sex.param',sg_dir+'/%s_sq_catalog.fits'%(field),str(seeing),str(param['satur_level_i_sq_psf']),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2)); print cmd
subprocess.check_call(shlex.split(cmd))
def pisco_photometry_v4(field):
def aperature_proj(field,band):
param=read_param()
seeing=find_seeing(field,band)
# seeing=find_seeing_fits(field)
# seeing = 1.1
# seeing=1.5
slrdir = 'slr_output'
to_be_projected = 'final/coadd_c%s_%s.fits'%(field,band)
reference_fits = 'final/coadd_c%s_i.fits'%field
im1,im2, header = FITS_tools.match_fits(to_be_projected,reference_fits,return_header=True)
outname = 'final/proj_coadd_c%s_%s.fits'%(field,band)
print 'projecting from %s band to i band the fits file '%band + outname
fits.writeto(outname, im1, header, overwrite=True)
minarea=1.7 #1.7
pxscale=0.22
# pxscale=0.11
cmd='sex final/coadd_c%s_%s.fits -c pisco_pipeline/config.sex -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PHOT_APERTURES 23 -PIXEL_SCALE %s -DETECT_MINAREA %s -CHECKIMAGE_NAME check_psf_%s.fits,segment_psf_%s.fits'%\
(field,band,'sex_psf.param','psfex_output/psf_%s_%s.fits'%(field,band),str(seeing),str(param['satur_level_%s_psf'%band]),str(pxscale),str(1.1/minarea*np.pi*(seeing/pxscale)**2), band, band)
print cmd
subprocess.check_call(shlex.split(cmd))
Tf=Table(fits.open('psfex_output/psf_%s_%s.fits'%(field,band))[2].data)
# Tfcut = Tf[(Tf['CLASS_STAR'] > 0.97) & (Tf['FLAGS'] == 0)].copy() #0.97 Field292
if len(Tf[(Tf['CLASS_STAR'] > 0.95) & (Tf['FLAGS'] < 5)]) > 0:
Tfcut = Tf[(Tf['CLASS_STAR'] > 0.95) & (Tf['FLAGS'] < 5)].copy()
else:
Tfcut = Tf[(Tf['CLASS_STAR'] > 0.9) & (Tf['FLAGS'] < 5)].copy()
# Tfcut = Tf[(Tf['CLASS_STAR'] > 0.9) & (Tf['FLAGS'] < 5)].copy() #0.97 Field292
Tfcut_edge=Tfcut[(Tfcut['XWIN_IMAGE']<np.max(Tfcut['XWIN_IMAGE'])-60)&(Tfcut['XWIN_IMAGE']>np.min(Tfcut['XWIN_IMAGE'])+60)&\
(Tfcut['YWIN_IMAGE']<np.max(Tfcut['YWIN_IMAGE'])-60)&(Tfcut['YWIN_IMAGE']>np.min(Tfcut['YWIN_IMAGE'])+60)].copy()
Tfcut_more=Tfcut_edge[(np.abs(Tfcut_edge['FLUX_RADIUS']-np.mean(Tfcut_edge['FLUX_RADIUS']))<2*np.std(Tfcut_edge['FLUX_RADIUS']))]
Tfcut_more2=Tfcut_more[(np.abs(Tfcut_more['ELONGATION']-np.mean(Tfcut_more['ELONGATION']))<2*np.std(Tfcut_more['ELONGATION']))].copy()
print "length of Tf: all: {}, CS>0.97: {}, edges: {}, flux_radius: {}, elong: {}".format(len(Tf), len(Tfcut), len(Tfcut_edge), len(Tfcut_more), len(Tfcut_more2))
hdu = fits.open('psfex_output/psf_%s_%s.fits'%(field,band))
hdu[2].data = hdu[2].data[Tfcut_more2['NUMBER']-1]
# hdu[2].data = hdu[2].data[Tfcut['NUMBER']-1]
hdu.writeto('psfex_output/psf_%s_%s.fits'%(field,band), overwrite=True)
cmd='psfex %s -c pisco_pipeline/pisco.psfex' % ('psfex_output/psf_%s_%s.fits'%(field,band))
print cmd
subprocess.check_call(shlex.split(cmd))
# minarea=3.0
cmd='sex final/coadd_c%s_i.fits,final/proj_coadd_c%s_%s.fits -c pisco_pipeline/config.sex -PSF_NAME %s -PARAMETERS_NAME pisco_pipeline/%s -CATALOG_NAME %s -SEEING_FWHM %s -SATUR_LEVEL %s -PIXEL_SCALE %s -CATALOG_TYPE FITS_1.0 -PHOT_APERTURES 23 -DETECT_MINAREA %s -CHECKIMAGE_NAME check%s.fits,segment%s.fits'%\
(field, field, band, 'psfex_output/psf_%s_%s.psf' % (field, band), 'sex_after_psf.param', '%s/a_psf_%s_%s.fits' % (slrdir, field, band),
str(seeing), str(param['satur_level_%s_psf' % band]), str(pxscale), str(1.1 / minarea * np.pi * (seeing / pxscale)**2), band, band)
print cmd
subprocess.check_call(shlex.split(cmd))
table=Table.read('%s/a_psf_%s_%s.fits'%(slrdir,field,band))
for name in table.colnames[:]:
table.rename_column(name, name + '_%s' % band)
return table
slrdir = 'slr_output'
if not os.path.exists(slrdir):
os.makedirs(slrdir)
tableg=aperature_proj(field,'g')
tablei=aperature_proj(field,'i')
tabler=aperature_proj(field,'r')
tablez=aperature_proj(field,'z')
print 'len of all table', len(tableg), len(tablei), len(tabler), len(tablez)
ci=SkyCoord(ra=np.array(tablei['ALPHA_J2000_i'])*u.degree, dec=np.array(tablei['DELTA_J2000_i'])*u.degree)# print len(ci)
cg=SkyCoord(ra=np.array(tableg['ALPHA_J2000_g'])*u.degree, dec=np.array(tableg['DELTA_J2000_g'])*u.degree)# print len(cg)
cr=SkyCoord(ra=np.array(tabler['ALPHA_J2000_r'])*u.degree, dec=np.array(tabler['DELTA_J2000_r'])*u.degree)# print len(cr)
cz=SkyCoord(ra=np.array(tablez['ALPHA_J2000_z'])*u.degree, dec=np.array(tablez['DELTA_J2000_z'])*u.degree)# print len(cz)
idxn, d2dn, d3dn=cg.match_to_catalog_sky(ci)
# Table_I=tablei[idxn][['NUMBER_i','XWIN_IMAGE_i','YWIN_IMAGE_i','ALPHA_J2000_i','DELTA_J2000_i','MAG_APER_i','MAGERR_APER_i','MAG_AUTO_i','MAGERR_AUTO_i','MAG_HYBRID_i','MAGERR_HYBRID_i',\
# 'CLASS_STAR_i','FLAGS_i','MAG_PSF_i','MAGERR_PSF_i','MAG_MODEL_i','MAGERR_MODEL_i','SPREAD_MODEL_i']]
Table_I=tablei[idxn][['NUMBER_i','XWIN_IMAGE_i','YWIN_IMAGE_i','ALPHA_J2000_i','DELTA_J2000_i','MAG_APER_i','MAGERR_APER_i','MAG_AUTO_i','MAGERR_AUTO_i','MAG_SPHEROID_i','MAGERR_SPHEROID_i',\
'CLASS_STAR_i','FLAGS_i','MAG_PSF_i','MAGERR_PSF_i','MAG_MODEL_i','MAGERR_MODEL_i','SPREAD_MODEL_i','SPREADERR_MODEL_i','MAG_ISO_i','MAGERR_ISO_i']]
Table_I.rename_column('ALPHA_J2000_i','ALPHA_J2000')
Table_I.rename_column('DELTA_J2000_i','DELTA_J2000')
idxn, d2dn, d3dn=cg.match_to_catalog_sky(cr)
# Table_R=tabler[idxn][['NUMBER_r','ALPHA_J2000_r','DELTA_J2000_r','MAG_APER_r','MAGERR_APER_r','MAG_AUTO_r','MAGERR_AUTO_r','MAG_HYBRID_r','MAGERR_HYBRID_r',\
# 'CLASS_STAR_r','FLAGS_r','MAG_PSF_r','MAGERR_PSF_r','MAG_MODEL_r','MAGERR_MODEL_r','SPREAD_MODEL_r']]
Table_R=tabler[idxn][['NUMBER_r','ALPHA_J2000_r','DELTA_J2000_r','MAG_APER_r','MAGERR_APER_r','MAG_AUTO_r','MAGERR_AUTO_r','MAG_SPHEROID_r','MAGERR_SPHEROID_r',\
'CLASS_STAR_r','FLAGS_r','MAG_PSF_r','MAGERR_PSF_r','MAG_MODEL_r','MAGERR_MODEL_r','SPREAD_MODEL_r','SPREADERR_MODEL_r','MAG_ISO_r','MAGERR_ISO_r']]
Table_R.rename_column('ALPHA_J2000_r','ALPHA_J2000')
Table_R.rename_column('DELTA_J2000_r','DELTA_J2000')
idxn, d2dn, d3dn=cg.match_to_catalog_sky(cz)
# Table_Z=tablez[idxn][['NUMBER_z','ALPHA_J2000_z','DELTA_J2000_z','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_z','MAGERR_AUTO_z','MAG_HYBRID_z','MAGERR_HYBRID_z',\
# 'CLASS_STAR_z','FLAGS_z','MAG_PSF_z','MAGERR_PSF_z','MAG_MODEL_z','MAGERR_MODEL_z','SPREAD_MODEL_z']]
Table_Z=tablez[idxn][['NUMBER_z','ALPHA_J2000_z','DELTA_J2000_z','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_z','MAGERR_AUTO_z','MAG_SPHEROID_z','MAGERR_SPHEROID_z',\
'CLASS_STAR_z','FLAGS_z','MAG_PSF_z','MAGERR_PSF_z','MAG_MODEL_z','MAGERR_MODEL_z','SPREAD_MODEL_z','SPREADERR_MODEL_z','MAG_ISO_z','MAGERR_ISO_z']]
Table_Z.rename_column('ALPHA_J2000_z','ALPHA_J2000')
Table_Z.rename_column('DELTA_J2000_z','DELTA_J2000')
# Table_G=tableg[['NUMBER_g','ALPHA_J2000_g','DELTA_J2000_g','MAG_APER_g','MAGERR_APER_g','MAG_AUTO_g','MAGERR_AUTO_g','MAG_HYBRID_g','MAGERR_HYBRID_g',\
# 'CLASS_STAR_g','FLAGS_g','MAG_PSF_g','MAGERR_PSF_g','MAG_MODEL_g','MAGERR_MODEL_g','SPREAD_MODEL_g']]
Table_G = tableg[['NUMBER_g', 'ALPHA_J2000_g', 'DELTA_J2000_g', 'MAG_APER_g', 'MAGERR_APER_g', 'MAG_AUTO_g', 'MAGERR_AUTO_g', 'MAG_SPHEROID_g', 'MAGERR_SPHEROID_g',
'CLASS_STAR_g','FLAGS_g','MAG_PSF_g','MAGERR_PSF_g','MAG_MODEL_g','MAGERR_MODEL_g','SPREAD_MODEL_g','SPREADERR_MODEL_g','MAG_ISO_g','MAGERR_ISO_g']]
Table_G.rename_column('ALPHA_J2000_g','ALPHA_J2000')
Table_G.rename_column('DELTA_J2000_g','DELTA_J2000')
print 'len of all new table', len(Table_G), len(Table_I), len(Table_R), len(Table_Z)
total=join(join(join(Table_I,Table_G,keys=['ALPHA_J2000','DELTA_J2000']),Table_R,keys=['ALPHA_J2000','DELTA_J2000']),\
Table_Z,keys=['ALPHA_J2000','DELTA_J2000'])
# total=join(join(join(mag_ii,mag_ig,keys='NUMBER'), mag_ir,keys='NUMBER'),\
# mag_iz,keys='NUMBER')
# total2=total[['ALPHA_J2000','DELTA_J2000','NUMBER_i','NUMBER_r','NUMBER_g','XWIN_IMAGE_i','YWIN_IMAGE_i',\
# 'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
# 'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
# 'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
# 'MAGERR_AUTO_z','MAG_HYBRID_i','MAGERR_HYBRID_i','MAG_HYBRID_g',\
# 'MAGERR_HYBRID_g','MAG_HYBRID_r','MAGERR_HYBRID_r','MAG_HYBRID_z',\
# 'MAGERR_HYBRID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
# 'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
# 'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
# 'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
# 'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
# 'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
# 'SPREAD_MODEL_i','SPREAD_MODEL_z',]]
total.write(os.path.join(slrdir, 'total0_psf_%s.csv' % field), overwrite=True)
total2=total[['ALPHA_J2000','DELTA_J2000','NUMBER_i','NUMBER_r','NUMBER_g','XWIN_IMAGE_i','YWIN_IMAGE_i',\
'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
'MAGERR_AUTO_z','MAG_ISO_g','MAGERR_ISO_g','MAG_ISO_r','MAGERR_ISO_r',\
'MAG_ISO_i','MAGERR_ISO_i','MAG_ISO_z','MAGERR_ISO_z',\
'MAG_SPHEROID_i','MAGERR_SPHEROID_i','MAG_SPHEROID_g',\
'MAGERR_SPHEROID_g','MAG_SPHEROID_r','MAGERR_SPHEROID_r','MAG_SPHEROID_z',\
'MAGERR_SPHEROID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
'SPREAD_MODEL_i','SPREAD_MODEL_z','SPREADERR_MODEL_g','SPREADERR_MODEL_r',\
'SPREADERR_MODEL_i','SPREADERR_MODEL_z']]
total2.write(os.path.join(slrdir, 'total_psf_%s.csv' % field), overwrite=True)
# total2.write(slrdir+'/all_psf_%s.fits' % field, overwrite=True)
def pisco_cut_star(field,c_a,c_b,c_d,c_delta):
seeing=find_seeing_fits(field)
true_seeing=find_seeing(field,'i')
df_i=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/%s_catalog.fits'%field)[1].data).to_pandas()
df_isq=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/%s_sq_catalog.fits'%field)[1].data).to_pandas()
#cut the object out so that it has the same number of object between the sq catalog list and the psf mag list.
fname = "/Users/taweewat/Documents/pisco_code/slr_output/total_psf_%s.csv"%field
df0 = pd.read_csv(fname)
df0['NUMBER'] = np.arange(0, len(df0), 1).tolist()
cf_i=SkyCoord(ra=np.array(df_i['ALPHA_J2000'])*u.degree, dec=np.array(df_i['DELTA_J2000'])*u.degree)
cf_isq=SkyCoord(ra=np.array(df_isq['ALPHA_J2000'])*u.degree, dec=np.array(df_isq['DELTA_J2000'])*u.degree)
cf0=SkyCoord(ra=np.array(df0['ALPHA_J2000'])*u.degree, dec=np.array(df0['DELTA_J2000'])*u.degree)
df0.rename(columns={'ALPHA_J2000': 'ALPHA_J2000_i'}, inplace=True)
df0.rename(columns={'DELTA_J2000': 'DELTA_J2000_i'}, inplace=True)
idxn, d2dn, d3dn=cf0.match_to_catalog_sky(cf_i)
df_i_cut0=df_i.loc[idxn].copy()
df_i_cut0['NUMBER']=np.arange(0,len(df0),1).tolist()
df_i_cut=pd.merge(df_i_cut0,df0,on='NUMBER')
idxn, d2dn, d3dn=cf0.match_to_catalog_sky(cf_isq)
df_isq_cut0=df_isq.loc[idxn].copy()
df_isq_cut0['NUMBER']=np.arange(0,len(df0),1).tolist()
df_isq_cut=pd.merge(df_isq_cut0,df0,on='NUMBER')
fig,ax=plt.subplots(2,3,figsize=(15,10))
df_i0=df_i_cut[(df_i_cut.MAG_APER<0)&(df_isq_cut.MAG_APER<0)]
df_isq0=df_isq_cut[(df_i_cut.MAG_APER<0)&(df_isq_cut.MAG_APER<0)]# print len(df_i), len(df_isq)
# c_d=-7.5
df_i2=df_i0[(df_i0.CLASS_STAR>c_a) & (df_i0.MAG_APER<c_d)]# & (df_i0.MAG_APER>c_c)]
df_isq2=df_isq0[(df_i0.CLASS_STAR>c_a) & (df_i0.MAG_APER<c_d)]# & (df_i0.MAG_APER>c_c)];# print len(df_i2), len(df_isq2)
icut_per=np.percentile(df_i2.MAG_APER,35) #35
df_i3=df_i2[df_i2.MAG_APER>icut_per]
df_isq3=df_isq2[df_i2.MAG_APER>icut_per]
fit=np.polyfit(df_i3.MAG_APER, df_i3.MAG_APER-df_isq3.MAG_APER, 1)
f=np.poly1d(fit)
ax[0,0].plot(df_i2.MAG_APER,f(df_i2.MAG_APER),'--')
res=(df_i3.MAG_APER-df_isq3.MAG_APER)-f(df_i3.MAG_APER)
aa=np.abs(res)<1.5*np.std(res)
# outl=np.abs(res)>=1.5*np.std(res)
fit=np.polyfit(df_i3.MAG_APER[aa], df_i3.MAG_APER[aa]-df_isq3.MAG_APER[aa], 1)
f=np.poly1d(fit)
ax[0,0].axvline(icut_per,color='blue',label='35th quantile')
ax[0,0].errorbar(df_i2.MAG_APER,df_i2.MAG_APER-df_isq2.MAG_APER,yerr=np.sqrt(df_i2.MAGERR_APER**2+df_isq2.MAGERR_APER**2),fmt='o')
ax[0,0].set_title('only for star')
ax[0,0].plot(df_i2.MAG_APER,f(df_i2.MAG_APER),'--',label='no outlier')
ax[0,0].set_ylabel('MAG_APER-MAG_APER_sq')
ax[0,0].set_xlabel('MAG APER i')
#---> #0.1 default, 0.2
c_c=df_i2[f(df_i2.MAG_APER)-(df_i2.MAG_APER-df_isq2.MAG_APER)<0.1]['MAG_APER'].values\
[np.argmin(df_i2[f(df_i2.MAG_APER)-(df_i2.MAG_APER-df_isq2.MAG_APER)<0.1]['MAG_APER'].values)] #edit10/30 (previous 0.1)
#--->
ax[0,0].axvline(c_c,color='red',label='new upper cut')
ax[0,0].legend(loc='best')
# color_axis='CLASS_STAR'
color_axis='SPREAD_MODEL_i'
ax[0,1].scatter(df_i0.MAG_APER,df_i0.MAG_APER-df_isq0.MAG_APER,marker='.',c=df_i0[color_axis],vmin=0., vmax=0.005)
ax[0,1].plot(df_i3.MAG_APER,df_i3.MAG_APER-df_isq3.MAG_APER,'x')
ax[0,1].set_title('for all objects')
ax[0,1].set_ylabel('MAG_APER-MAG_APER_sq')
ax[0,1].set_xlabel('MAG APER i')
ax[0,1].axvline(c_b,ls='--')
ax[0,1].axvline(c_c,ls='--')
delta=(df_i0.MAG_APER-df_isq0.MAG_APER) - f(df_i0.MAG_APER)
ax[0,2].scatter(df_i0.MAG_APER,delta,marker='.',c=df_i0[color_axis],vmin=0., vmax=0.005)
ax[0,2].axhline(0,ls='--')
ax[0,2].axvline(c_c,ls='--')
ax[0,2].axvline(c_b,ls='--')
ax[0,2].set_ylabel('Delta')
ax[0,2].set_xlabel('MAG APER i')
ax[0,2].set_ylim(0.5,-1.2)
df_i1=df_i0[(df_i0.MAG_APER>c_c)&(df_i0.MAG_APER<c_b)].copy()
df_isq1=df_isq0[(df_i0.MAG_APER>c_c)&(df_i0.MAG_APER<c_b)].copy()
delta1=(df_i1.MAG_APER-df_isq1.MAG_APER) - f(df_i1.MAG_APER)
ax[1,0].scatter(df_i1.MAG_APER, delta1, marker='o', c=df_i1[color_axis],vmin=0., vmax=0.005)
ax[1,0].axhline(0,ls='--')
ax[1,0].axhline(c_delta, ls='--')
ax[1,0].set_ylabel('Delta')
ax[1,0].set_xlabel('MAG APER i')
ax[1,0].set_ylim(0.5,-2)
# deltag=delta1[delta1<c_delta] #galaxy 0.1, 0.2 (0.005), 0.5 ()
deltas=delta1[(delta1>=c_delta)&(delta1<3.)] #star
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
p0 = [1., 0., 0.1]
# def gauss(x, *p):
# A, sigma = p
# return A*np.exp(-(x-0)**2/(2.*sigma**2))
# p0 = [1., 0.1]
#galaxy
# hist, bin_edges = np.histogram(deltag,bins=np.arange(-1.2,0.5,0.02))
hist, bin_edges = np.histogram(delta1,bins=np.arange(-1.2,0.5,0.02))
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
ax[1,1].plot(bin_centres, hist, label='galaxies',linestyle='steps')
#stars
hist, bin_edges = np.histogram(deltas,bins=np.arange(-1,0.5,0.02)) #(0 vs -1,0.5,0.02)
# hist, bin_edges = np.histogram(delta1, bins=np.arange(c_delta, 0.5, 0.02))
bin_centres = (bin_edges[:-1] + bin_edges[1:])/2
coeff2, var_matrix = curve_fit(gauss, bin_centres, hist, p0=p0)
ax[1,1].plot(bin_centres, hist, label='stars',linestyle='steps')
# hist, bin_edges = np.histogram(delta1,bins=np.arange(-1.2,0.5,0.02)) #added for right gaussian fitting
# bin_centres = (bin_edges[:-1] + bin_edges[1:])/2 # added for right gaussian fitting
x=np.arange(-1.25,0.5,0.02)
# hist_fit2 = gauss(x, *coeff2)
hist_fit2 = gauss(x, *coeff2)
hist_fit3 = gauss(x, *coeff2)/np.max(gauss(x, *coeff2)) #added for right gaussian fitting
ax[1,1].plot(x, hist_fit2, label='stars_fit')
ax[1,1].plot(x, hist_fit3, label='stars_fit_norm') #added for right gaussian fitting
ax[1,1].axvline(x[hist_fit3>star_cut][0],c='tab:pink',label='cut:%.3f'%x[hist_fit3>star_cut][0]) #added for right gaussian fitting
ax[1,1].legend(loc='best')
ax[1,1].set_xlabel('Delta')
ax[1,1].set_ylabel('Histogram')
ax[0,2].axhline(x[hist_fit3>star_cut][0],c='tab:pink') #added for right gaussian fitting
ax[1,0].axhline(x[hist_fit3>star_cut][0],c='tab:pink') #added for right gaussian fitting
ax[1,2].axhline(star_cut, c='tab:red') # added for right gaussian fitting
maxi=np.max(gauss(delta,*coeff2))
def prob_SG(delta,maxi,*coeff2):
if delta>0.:
return 0.
elif delta<=0.:
return 1. - (gauss(delta, *coeff2) / maxi)
vprob_SG= np.vectorize(prob_SG)
SG=1.-vprob_SG(delta1,maxi,*coeff2)
df_i1.loc[:,'SG']=SG
param_izp=read_param_izp('psf')
mag0=param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
axi = ax[1, 2].scatter(df_i1.MAG_APER + mag0, SG,
marker='.', c=df_i1[color_axis], vmin=0., vmax=0.005)
ax[1,2].axvline(aper_cut, ls='--', c='tab:blue')
ax[1,2].axhline(SG_upper, ls='--', c='tab:blue')
ax[1,2].set_ylim(-0.02,1.02)
ax[1,2].set_xlabel('MAG APER i')
ax[1,2].set_ylabel('SG (probability to be a star)')
plt.suptitle(field+' seeing vs true_seeing: '+str(seeing)+','+str(true_seeing))
fig.colorbar(axi)
plt.tight_layout(rect=[0, 0., 1, 0.98])
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/star_galaxy_sep_12_all%s.png' % field, dpi=120)
plt.close(fig)
return df_i_cut, df_i1
def pisco_cut_frame(field):
# df_i=Table(fits.open('/Users/taweewat/Documents/pisco_code/star_galaxy/'+
# '%s_catalog.fits'%field)[1].data).to_pandas()
"""
c_a: CLASS_STAR lower limit for stars used for the linear fit
c_b, c_c: upper and lower limit for all objects selection
c_c can be moved with the for loop to include more objects until the confusion limit
c_d: Faintest magnitude for stars used for the linear fit
c_delta: lower limit for Delta to consider stars before fitting the gaussian and find SG (Star/Galaxy) factor
"""
seeing=find_seeing_fits(field)
true_seeing=find_seeing(field,'i')
##Using SPREAD_MODEL to seperate star/galaxies
fname = "/Users/taweewat/Documents/pisco_code/slr_output/total_psf_%s.csv"%field
df0 = pd.read_csv(fname)
df0['NUMBER'] = np.arange(0, len(df0), 1).tolist()
df0.rename(columns={'ALPHA_J2000': 'ALPHA_J2000_i'}, inplace=True)
df0.rename(columns={'DELTA_J2000': 'DELTA_J2000_i'}, inplace=True)
#EXTENDED_COADD: 0 star, 1 likely star, 2 mostly galaxies, 3 galaxies
# df0['EXTENDED_COADD']=np.array(((df0['SPREAD_MODEL_i']+ 3*df0['SPREADERR_MODEL_i'])>0.005).values, dtype=int)+\
# np.array(((df0['SPREAD_MODEL_i']+df0['SPREADERR_MODEL_i'])>0.003).values, dtype=int)+\
# np.array(((df0['SPREAD_MODEL_i']-df0['SPREADERR_MODEL_i'])>0.003).values, dtype=int)
# dff=df0[df0['EXTENDED_COADD']>1]
# dff_star=df0[df0['EXTENDED_COADD']<2]
dfi=df0[df0['MAG_AUTO_i']<-16]
x=dfi['MAG_AUTO_i']
y=dfi['SPREAD_MODEL_i']
p_spread=np.poly1d(np.polyfit(x,y,1))
xs=np.arange(np.min(df0['MAG_AUTO_i']),np.max(df0['MAG_AUTO_i']),0.01)
df0['SPREAD_MODEL_i2']=df0['SPREAD_MODEL_i']-p_spread(df0['MAG_AUTO_i'])
dff=df0[(df0['SPREAD_MODEL_i']>0.005)]
# dff_star=df0[np.abs(df0['SPREAD_MODEL_i'])<0.004] #+5/3.*df0['SPREADERR_MODEL_i'] <0.002
dff_star=df0[(df0['SPREAD_MODEL_i']<0.004)]#&(df0['FLAGS_i']<4)]
fig=plt.figure(figsize=(4,4))
plt.plot(df0['MAG_AUTO_i'],df0['SPREAD_MODEL_i'],'.',c='grey',alpha=0.1)
plt.plot(dff['MAG_AUTO_i'],dff['SPREAD_MODEL_i'],'.',alpha=1,label='galaxies')
plt.plot(dff_star['MAG_AUTO_i'],dff_star['SPREAD_MODEL_i'],'.',alpha=1,label='stars')
plt.ylim(-0.08,0.08)
plt.xlim(-19,-10.5)
plt.axhline(0.005,color='tab:orange')
plt.legend(loc='best')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/spread_model_real_i_fit_%s_%s.png' %
(mode, field), dpi=120)
plt.close(fig)
dff0=dff
dff0.to_csv("/Users/taweewat/Documents/pisco_code/slr_output/"+\
"galaxy_psf_total_%s.csv"%field)
# dff_star0=pd.merge(dff_star, df0, on='NUMBER') # for non-SPREAD_MODEL
dff_star0=dff_star #for SPREAD_MODEL
dff_star0.to_csv("/Users/taweewat/Documents/pisco_code/slr_output/"+\
"star_psf_total_%s.csv"%field)
def pisco_photometry_psf_v4(field, mode='psf', mode2mass='', slr=True): #mode2mass: '' vs '_no2mass'
def slr_running_psf(field, infile="None", mode="psf", mode2mass='', bigmacs="pisco_pipeline/big-macs-calibrate-master"):
"""
slr_running: running SLR script from github.com/patkel/big-macs-calibrate to get a calibrated magnitude
INPUT:
- field: object of interset e.g., 'Field026'
- bigmacs: the location for "big-macs-calibrate" directoty
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
slrdir = 'slr_output'
pyfile = os.path.join(bigmacs, 'fit_locus.py')
# cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i -j --plot=PLOTS_%s_%s" \
# % (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,'')), mode, field)
if mode2mass=='':
cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i -j --plot=PLOTS_%s_%s" \
% (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,mode2mass)), mode, field) #'' vs '_no2mass'
elif mode2mass=='_no2mass':
cmd = "python %s --file %s --columns %s --extension 1 --bootstrap 15 -l -r ALPHA_J2000_i -d DELTA_J2000_i --plot=PLOTS_%s_%s" \
% (pyfile, infile, os.path.join(bigmacs, "coadd_mag_sex_%s%s.columns"%(mode,mode2mass)), mode, field) #'' vs '_no2mass'
print cmd
subprocess.check_call(shlex.split(cmd))
def update_color(fname, table, mode='psf'):
"""
update_color: using the output from SLR, update to the correct magnitude
INPUT:
- fname: input file from SLR output (...offsets.list)
- table: the table that we want to update the value (from column magg,etc to MAG_g,etc)
OUTPUT:
- a new table with added columns with name MAG_g,...,MAGERR_g,...
"""
print fname
with open(fname) as f:
content = f.readlines()
content = [x.strip() for x in content]
# print content
if len(content)==8:
red_content=content[4:]
elif len(content)==10:
red_content=content[5:-1]
# if len(content)==7:
# red_content=content[4:]
# elif len(content)==9:
# red_content=content[5:-1]
band = [x.split(' ')[0][-1] for x in red_content]
corr = [float(x.split(' ')[1]) for x in red_content]
ecorr = [float(x.split(' ')[3]) for x in red_content]
print 'bands = ', band
if mode=='psf':
MODE1='PSF'
elif mode=='model':
MODE1='MODEL'
elif mode=='auto':
MODE1='AUTO'
elif mode=='aper':
MODE1='APER'
elif mode=='hybrid':
MODE1='HYBRID'
elif mode=='iso':
MODE1='ISO'
table['MAG_' + band[0]] = table['MAG_%s_'%MODE1 + band[0]] + corr[0]
table['MAG_' + band[1]] = table['MAG_%s_'%MODE1 + band[1]] + corr[1]
table['MAG_' + band[2]] = table['MAG_%s_'%MODE1 + band[2]] + corr[2]
table['MAG_' + band[3]] = table['MAG_%s_'%MODE1 + band[3]] + corr[3]
table['MAGERR_' + band[0]] = (table['MAGERR_%s_'%MODE1 + band[0]]**2)**0.5# + ecorr[0]**2)**0.5
table['MAGERR_' + band[1]] = (table['MAGERR_%s_'%MODE1 + band[1]]**2)**0.5# + ecorr[1]**2)**0.5
table['MAGERR_' + band[2]] = (table['MAGERR_%s_'%MODE1 + band[2]]**2)**0.5# + ecorr[2]**2)**0.5
table['MAGERR_' + band[3]] = (table['MAGERR_%s_'%MODE1 + band[3]]**2)**0.5# + ecorr[3]**2)**0.5
return table
slrdir = 'slr_output'
df0=pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv" % field,index_col=0)
# if field=='SDSS603':
# df0=df0.drop([399,258,357,157,381,310,86,81,31,66,422,232,208,19,10])
# elif field=='SDSS501':
# df0=df0.drop([265,108,196,213,160])
# elif field=='SDSS123':
# df0=df0.drop([68,5,61])
# else:
# df0=df0
total3 = Table.from_pandas(df0)
total3=total3[['NUMBER','ALPHA_J2000_i','DELTA_J2000_i','XWIN_IMAGE_i','YWIN_IMAGE_i',\
'MAG_APER_i','MAGERR_APER_i','MAG_APER_g','MAGERR_APER_g','MAG_APER_r',\
'MAGERR_APER_r','MAG_APER_z','MAGERR_APER_z','MAG_AUTO_i','MAGERR_AUTO_i',\
'MAG_AUTO_g','MAGERR_AUTO_g','MAG_AUTO_r','MAGERR_AUTO_r','MAG_AUTO_z',\
'MAGERR_AUTO_z','MAG_ISO_i','MAGERR_ISO_i','MAG_ISO_g','MAGERR_ISO_g','MAG_ISO_r',\
'MAGERR_ISO_r','MAG_ISO_z','MAGERR_ISO_z',\
'MAG_SPHEROID_i','MAGERR_SPHEROID_i','MAG_SPHEROID_g',\
'MAGERR_SPHEROID_g','MAG_SPHEROID_r','MAGERR_SPHEROID_r','MAG_SPHEROID_z',\
'MAGERR_SPHEROID_z','CLASS_STAR_i','CLASS_STAR_g','CLASS_STAR_r',\
'CLASS_STAR_z','FLAGS_g','FLAGS_r','FLAGS_i','FLAGS_z','MAG_PSF_g',\
'MAG_PSF_r','MAG_PSF_i','MAG_PSF_z','MAGERR_PSF_g','MAGERR_PSF_r',\
'MAGERR_PSF_i','MAGERR_PSF_z','MAG_MODEL_g','MAG_MODEL_r',\
'MAG_MODEL_i','MAG_MODEL_z','MAGERR_MODEL_g','MAGERR_MODEL_r',\
'MAGERR_MODEL_i','MAGERR_MODEL_z','SPREAD_MODEL_g','SPREAD_MODEL_r',\
'SPREAD_MODEL_i','SPREAD_MODEL_z','SPREADERR_MODEL_g','SPREADERR_MODEL_r',\
'SPREADERR_MODEL_i','SPREADERR_MODEL_z']]
print 'number of stars =', len(total3)
if (mode2mass==''):
starpsfmode = '_psf'
elif (mode2mass=='_no2mass'):
starpsfmode ='_no2mass'
# total3.write(slrdir+'/star_psf%s_%s_%i.fits' % ('_psf',field,0), overwrite=True) #with 2MASS stars: star_psf_psf_%s_%i.fits
total3.write(slrdir+'/star_psf%s_%s_%i.fits'%(starpsfmode,field,0),overwrite=True)
# no 2MASS star mode vs , '_psf' vs '_no2mass'
if slr:
slr_running_psf(field, infile=slrdir + '/star_psf%s_%s_%i.fits' %
(starpsfmode, field, 0), mode='psf', mode2mass=mode2mass) # '_psf' vs '_no2mass'
print 'mode=', mode, '/star_psf%s_%s_%i.fits.offsets.list' % (starpsfmode, field, 0)
total_gal=Table.from_pandas(pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/galaxy_psf_total_%s.csv"%(field)))
ntotal_gal = update_color(slrdir+'/star_psf%s_%s_%i.fits.offsets.list' %
(starpsfmode, field, 0), total_gal, mode=mode)
ntotal_gal.write(os.path.join(
slrdir, 'galaxy_%s%s_ntotal_%s.csv'%(mode,mode2mass,field)), overwrite=True)
total_star=Table.from_pandas(pd.read_csv("/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv"%(field)))
ntotal_star = update_color(slrdir+'/star_psf%s_%s_%i.fits.offsets.list'%
(starpsfmode, field, 0), total_star, mode=mode)
ntotal_star.write(os.path.join(
slrdir, 'star_%s%s_ntotal_%s.csv'%(mode,mode2mass,field)), overwrite=True)
def make_images(field,ax=None):
dir='/Users/taweewat/Documents/pisco_code/Chips_images/'
try:
ax.imshow(image.imread(dir+"aplpy4_%s_img4.jpeg"%field))
except:
ax.imshow(image.imread(dir+"aplpy4_%s_img.jpeg"%field))
# ax.imshow(image.imread(dir+"aplpy4_%s_img4.jpeg"%field))
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axis('off')
return None
# def sur_pro(r): #Mpc
# def fn(x):
# if x>=1:
# return 1.-(2/np.sqrt(x**2-1)*np.arctan(np.sqrt((x-1.)/(x+1.))))
# elif x<1:
# return 1.-(2/np.sqrt(1-x**2)*np.arctanh(np.sqrt((1.-x)/(x+1.))))
# rs=0.15/0.71 #Mpc
# if r>=(0.1/0.71):
# return 1/((r/rs)**2-1)*fn(r/rs)
# elif r<(0.1/0.71):
# return 1./(((0.1/0.71)/rs)**2-1)*fn((0.1/0.71)/rs)
# def k_NFW():
# def integrated(y):
# return 1./integrate.quad(lambda r: 2*np.pi*r*sur_pro(r),0,y)[0]
# xy=np.logspace(-3,3,num=30)
# X = np.log(xy)
# Y = np.log([integrated(np.e**(y)) for y in X])
# Z=np.polyfit(X,Y,6)
# k_NFW = np.poly1d(Z)
# return k_NFW
# def sur_pro_prob(r,rc,k_NFW): #(Mpc,Mpc) # Weighted based on the distance from the center (Rykoff+12)
# return np.e**(k_NFW(np.log(rc)))*sur_pro(r)
name=['z','dist','age','mass','Abs_g','App_g','kcorr_g','Abs_r',\
'App_r','kcorr_r','Abs_i','App_i','kcorr_i','Abs_z','App_z','kcorr_z']
df=pd.read_csv('/Users/taweewat/Documents/red_sequence/rsz/model/'+\
# 'ezmodel2_bc03_zf2.5_chab_0.016_exp_0.1.txt',
'ezmodel2_bc03_zf2.5_chab_0.02_exp_0.1.txt',
# 'ezmodel2_c09_zf3.0_chab_0.02_exp_0.1.txt',
skiprows=27,delim_whitespace=True,names=name)
df=df[(df.z>=0.1) & (df.z<1.)]
z_new=np.arange(0.1, 0.95, 0.0025)
Appi_new = interpolate.splev(z_new, interpolate.splrep(df.z, df.App_i, s=0), der=0)
Appi_f = interpolate.interp1d(df.z, df.App_i, kind='cubic')
#all extra options
extra_name= 'gnorm_zf2.5_bc03_noebv_auto_bin1.0_root15_sur0.25' #'gremove_lum_silk_zf2.5_c09_11', 'gremove_silk_zf3_c09_noebv_model_complete_no2mass'
core_radius=0.25
gremove = False # remove non-detect g objects from the list
duplicate = False # remove duplicate redshift (uncertain)
colorerr = True # add redshift with color_error taken into account
transparent = True # make transparent plot for flip book
img_filp = False # make image flip from transparent
img_redshift = True # make image with redshift for each object
def linear_rmi(x0,redshift):
x=df.z[:-11] #-12
y=(df.App_r-df.App_i)[:-11] #-12
yhat = np.polyfit(x, y, 5) #5 vs 9
f_rmi = np.poly1d(yhat)
slope=-0.0222174237562*1.007
# Appi0=Appi_new[np.where(abs(z_new-redshift)<=1e-9)[0][0]]
Appi0=Appi_f(redshift)
return slope*(x0-Appi0)+f_rmi(redshift)
def linear_gmr(x0,redshift):
x=df.z[:-24] #-25
y=(df.App_g-df.App_r)[:-24] #-25
yhat = np.polyfit(x, y, 5)
f_gmr = np.poly1d(yhat)
slope=-0.0133824600874*1.646
# Appi0=Appi_new[np.where(abs(z_new-redshift)<=1e-9)[0][0]]
Appi0=Appi_f(redshift)
return slope*(x0-Appi0)+f_gmr(redshift)
def linear_gmi(x0,redshift):
x=df.z[:-9]
y=(df.App_g-df.App_i)[:-9]
yhat = np.polyfit(x, y, 5)
f_gmi = np.poly1d(yhat)
Appi0=Appi_f(redshift)
slope = -0.04589707934164738 * 1.481
return slope*(x0-Appi0)+f_gmi(redshift)
def find_fits_dir(field):
home = '/Users/taweewat/Documents/pisco_code/'
dirs = ['ut170103/', 'ut170104/', 'ut170619/', 'ut170621/',\
'ut170624/', 'ut171208/', 'ut171209/', 'ut171212/']
myReg = re.compile(r'(%s_A).*' % field)
for di in dirs:
diri = home + di
for text in os.listdir(diri):
if myReg.search(text) != None:
# filename = myReg.search(text).group()
allfilename = diri
return allfilename
dir_dict = dict(zip(['ut170103/','ut170104/','ut170619/',\
'ut170621/','ut170624/','ut171208/','ut171209/','ut171212/'], np.arange(1, 9)))
def find_ra_dec(field):
if field == 'PKS1353':
RA = 209.0225
DEC = -34.3530556
redshift = 0.223
elif field == 'CHIPS2249-2808': #CHIPS2227-4333
# RA = 336.99975202151825
# DEC = -43.57623068466675
RA = 336.98001
DEC = -43.56472
redshift = -1
elif field == 'CHIPS2246-2854': #'CHIPS2223-3455'
# RA = 335.7855174238757
# DEC = -34.934569299688185
RA = 335.78
DEC = -34.9275
redshift = -1
elif field[0:5] == 'Field':
base = pd.read_csv(
'/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs.csv')
RA = base[base.name == field].ra.values[0]
DEC = base[base.name == field].dec.values[0]
redshift = base[base.name == field].redshift.values[0]
elif field[0:5] == 'CHIPS':
base = pd.read_csv(
'/Users/taweewat/Documents/red_sequence/chips_all_obj.csv', index_col=0)
RA = base[base.chips == field].ra.values[0]
DEC = base[base.chips == field].dec.values[0]
redshift = base[base.chips == field].redshift.values[0]
elif field[0:4] == 'SDSS':
base = pd.read_csv(
'/Users/taweewat/Documents/xray_project/ned-result/final_sdss_cut5.csv', index_col=0)
RA = base[base.name == field].RA.values[0]
DEC = base[base.name == field].DEC.values[0]
redshift = base[base.name == field].redshift.values[0]
return RA, DEC, redshift
def pisco_tilt_resequence(field, mode='psf', mode2mass=''):
RA, DEC, redshift = find_ra_dec(field)
if redshift!=-1:
qso_redshift=redshift
else:
qso_redshift=0.2
print 'RA', RA
print 'DEC', DEC
ebv = ebvpy.calc_ebv(ra=[RA],dec=[DEC]); print 'ebv:', ebv[0]
# ebv_g=ebvpy.calc_color_correction('g', ebv)[0]
# ebv_r=ebvpy.calc_color_correction('r', ebv)[0]
# ebv_i=ebvpy.calc_color_correction('i', ebv)[0]
# ebv_z=0.0
ebv_g,ebv_r,ebv_i,ebv_z=0.0,0.0,0.0,0.0 #no longer use reddening correction because it is already included in SLR
print 'ebv_g:', ebv_g, 'ebv_r:', ebv_r, 'ebv_i:', ebv_i
param_izp=read_param_izp(mode) #i zero point
# fname = "/Users/taweewat/Documents/pisco_code/slr_output/galaxy_ntotal_%s.csv"%field
dir_slrout='/Users/taweewat/Documents/pisco_code/slr_output/'
fname = dir_slrout+"galaxy_%s%s_ntotal_%s.csv" % (
mode, mode2mass, field) # '' vs '_no2mass'
df0 = pd.read_csv(fname,index_col=0)
if gremove:
nog=len(df0[df0['MAG_PSF_g'] >= 50.]); print "no g detected:", nog
df0 = df0[df0['MAG_PSF_g'] < 50.].copy() # cut out not detected objects in g band
else:
nog=0
c5 = SkyCoord(ra=df0['ALPHA_J2000_i'].values*u.degree, dec=df0['DELTA_J2000_i'].values*u.degree)
c0 = SkyCoord(ra=RA*u.degree, dec=DEC*u.degree)
sep = c5.separation(c0)
df0['sep(deg)']=sep
df0['sep(Mpc)']=sep*60.*cosmo.kpc_proper_per_arcmin(qso_redshift).value/1e3
cut=df0
dfi = cut#.drop_duplicates(subset=['XWIN_WORLD', 'YWIN_WORLD'], keep='first').copy()
print 'duplicates:', len(df0), len(dfi)
# Added Galactic Reddening (6/16/18)
if mode2mass == '':
dfi['MAG_i']=dfi['MAG_i']-ebv_i
dfi['MAG_g']=dfi['MAG_g']-ebv_g
dfi['MAG_r']=dfi['MAG_r']-ebv_r
# Use i Zero Point from each day and g,r zero point fron the color (6/22/18)
elif mode2mass == '_no2mass':
mag0 = param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
print 'i_zp_day', find_fits_dir(field), mag0
dfi['MAG_i']=dfi['MAG_i']-ebv_i+mag0
dfi['MAG_g']=dfi['MAG_g']-ebv_g+mag0
dfi['MAG_r']=dfi['MAG_r']-ebv_r+mag0
dfi['MAG_z']=dfi['MAG_z']-ebv_z+mag0
dfi.to_csv(dir_slrout+"galaxy_%s_final_%s.csv"%(mode,field))
fname=dir_slrout+"star_%s%s_ntotal_%s.csv" % (mode, mode2mass, field)
df0=pd.read_csv(fname,index_col=0)
dfi=df0
# Added Galactic Reddening (6/16/18)
if mode2mass == '':
dfi['MAG_i']=dfi['MAG_i']-ebv_i
dfi['MAG_g']=dfi['MAG_g']-ebv_g
dfi['MAG_r']=dfi['MAG_r']-ebv_r
# Use i Zero Point from each day and g,r zero point fron the color (6/22/18)
elif mode2mass == '_no2mass':
mag0 = param_izp['i_zp_day%i'%dir_dict[find_fits_dir(field)[-9:]]]
print 'i_zp_day', find_fits_dir(field), mag0
dfi['MAG_i']=dfi['MAG_i']-ebv_i+mag0
dfi['MAG_g']=dfi['MAG_g']-ebv_g+mag0
dfi['MAG_r']=dfi['MAG_r']-ebv_r+mag0
dfi['MAG_z']=dfi['MAG_z']-ebv_z+mag0
dfi.to_csv(dir_slrout+"star_%s_final_%s.csv"%(mode,field))
return None
# dfi=dfi[dfi['MAG_i']<21.5].copy()
# dfi=dfi[dfi.MAGERR_g<0.5]
# dfi=dfi[(dfi.MAG_g<100)&(dfi.MAG_i<100)&(dfi.MAG_r<100)]
# dfi=dfi[(dfi.FLAGS_g<5)&(dfi.FLAGS_r<5)&(dfi.FLAGS_i<5)&(dfi.FLAGS_z<5)]
def xxx(x):
dfi=dfi[np.sqrt(dfi['MAGERR_r']**2+dfi['MAGERR_i']**2)<0.3].copy() #0.5
x=dfi['MAG_i']
y=np.sqrt(dfi['MAGERR_r']**2+dfi['MAGERR_i']**2)
p=np.poly1d(np.polyfit(x,np.log(y),1, w=np.sqrt(y)))
Mag_cut=(p-np.log(0.067*1.5)).roots;
print "Mag_cut: %.2f"%(Mag_cut)
xs=np.arange(np.min(x),np.max(x),0.01)
fig,ax=plt.subplots(figsize=(5,5))
plt.plot(x,y,'.',label='r-i')
plt.plot(xs,np.exp(p(xs)),label='exp({:.1f}+{:.1f}x)'.format(p[0],p[1]))
plt.xlabel('Mag_i'); plt.ylabel('$\Delta r-i$ err')
plt.ylim(-0.05,0.35)
plt.axvline(Mag_cut,label='Mag_cut')
plt.legend(loc='best')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/uncer_%s_%s.png' %
(mode, field), dpi=120)
# plt.tight_layout()
plt.close(fig)
#Magnitude cut
print field, qso_redshift, df0.shape, cut.shape, dfi.shape, dfi['sep(deg)'].max(), dfi['sep(Mpc)'].max()
norm = matplotlib.colors.Normalize(vmin=0.10,vmax=0.675)
c_m = matplotlib.cm.cool
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
I=np.arange(16,24,0.01)
dfi.loc[:,"z_gmr"] = np.nan
dfi.loc[:,"z_rmi"] = np.nan
dfi.loc[:,"w_gmr"] = np.nan
dfi.loc[:,"w_rmi"] = np.nan
dfi.loc[:,"w_col_gmr"] = np.nan
dfi.loc[:,"w_col_rmi"] = np.nan
# dfi.loc[:,"z_gmi"] = np.nan
# dfi.loc[:,"w_gmi"] = np.nan
# dfi.loc[:,"w_col_gmi"] = np.nan
# k_NFW0=k_NFW()
bin_width=0.035 #0.025
# bins_gmr_cen = np.arange(0.15815-0.0175, 0.33315-0.0175+0.01, bin_width)
# bins_gmr_edge = np.arange(0.14065-0.0175, 0.35065-0.0175+0.01, bin_width)
# bins_gmr_cen = np.arange(0.12315+0.0175, 0.33315+0.0175+0.01, bin_width)
# bins_gmr_edge = np.arange(0.10565+0.0175, 0.35065+0.0175+0.01, bin_width)
# bins_rmi_cen = np.arange(0.36815-0.0175, 0.64815-0.0175+0.01, bin_width)
# bins_rmi_edge = np.arange(0.35065-0.0175, 0.66565-0.0175+0.01, bin_width)
#new one: combine last gmr with "new" rmi
bins_gmr_cen = np.arange(0.12315, 0.33315+0.01, bin_width)
bins_gmr_edge = np.arange(0.10565, 0.35065+0.01, bin_width)
bins_rmi_cen = np.arange(0.36815-bin_width, 0.64815+0.01, bin_width)
bins_rmi_edge = np.arange(0.35065-bin_width, 0.66565+0.01, bin_width)
z_rmi,w_rmi,w_col_rmi=[],[],[]
for i, row in dfi.iterrows():
for z in bins_rmi_cen:
# if row['MAG_i'] < -18+5.*np.log10(ex.d_L(z)*1e6)-5.:
# if row['MAG_i'] < magi_cut_rmi:
# if np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)<0.134: #np.mean(f_rmi(x+0.07)-f_rmi(x))
# if np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)<0.067*1.5: #0.067*1.5
if row['MAG_i'] < Mag_cut:
rmi=row['MAG_r']-row['MAG_i']
# rmierr=np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2)
low_edge=linear_rmi(row['MAG_i'],round(z-0.0175,4)) #0.0125
high_edge=linear_rmi(row['MAG_i'],round(z+0.0175,4)) #0.0125
if (rmi > low_edge) & (rmi <= high_edge):
# if (np.sqrt(row['MAGERR_r']**2+row['MAGERR_i']**2) < 3.5*(high_edge-low_edge)):
z_rmi.append(round(z,3))
# wrmi0=sur_pro_prob(row['sep(Mpc)'],1.,k_NFW0)
wrmi0=ex.sur_pro_prob_ang(row['sep(deg)']*60, core_radius); w_rmi.append(wrmi0) #arcmin
# w_col_rmi0=scipy.stats.norm(rmi,rmierr).cdf(high_edge)-scipy.stats.norm(rmi,rmierr).cdf(low_edge); w_col_rmi.append(w_col_rmi0)
w_col_rmi0=1.; w_col_rmi.append(w_col_rmi0)
dfi.loc[i,"z_rmi"]=z
dfi.loc[i,"w_rmi"]=wrmi0
dfi.loc[i,"w_col_rmi"]=w_col_rmi0
z_gmr,w_gmr,w_col_gmr=[],[],[]
for i, row in dfi.iterrows():
for z in bins_gmr_cen:
# if row['MAG_i'] < -18+5.*np.log10(ex.d_L(z)*1e6)-5.:
# if row['MAG_i'] < magi_cut_gmr:
# if np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2)<0.165: #np.mean(f_gmr(x+0.07)-f_gmr(x))
# if np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2)<0.0825*1.5: #0.0825*1.5
if row['MAG_i'] < Mag_cut:
gmr=row['MAG_g']-row['MAG_r']
# gmrerr=np.sqrt((row['MAGERR_g'])**2+row['MAGERR_r']**2) #add factor 2.2 to reduce the g error to be similar to other bands
low_edge=linear_gmr(row['MAG_i'],round(z-0.0175,4)) #0.0125
high_edge=linear_gmr(row['MAG_i'],round(z+0.0175,4)) #0.0125
if (gmr > low_edge) & (gmr <= high_edge):
# if (np.sqrt(row['MAGERR_g']**2+row['MAGERR_r']**2) < 3.5*(high_edge-low_edge)):
z_gmr.append(round(z,3))
# w_col_gmr0=scipy.stats.norm(gmr,gmrerr).cdf(high_edge)-scipy.stats.norm(gmr,gmrerr).cdf(low_edge); w_col_gmr.append(w_col_gmr0)
w_col_gmr0=1.; w_col_gmr.append(w_col_gmr0)
# wgmr0=sur_pro_prob(row['sep(Mpc)'],1.,k_NFW0); w_gmr.append(wgmr0)
wgmr0 = ex.sur_pro_prob_ang(row['sep(deg)'] * 60, core_radius); w_gmr.append(wgmr0) # arcmin
dfi.loc[i,"z_gmr"]=z
dfi.loc[i,"w_gmr"]=wgmr0
dfi.loc[i,"w_col_gmr"]=w_col_gmr0
ns1,xs1=np.histogram(z_gmr,bins=bins_gmr_edge,weights=np.array(w_gmr)*np.array(w_col_gmr)) #0.15-0.325
bin_cen1 = (xs1[:-1] + xs1[1:])/2
ns2,xs2=np.histogram(z_rmi,bins=bins_rmi_edge,weights=np.array(w_rmi)*np.array(w_col_rmi)) #0.36-0.675
bin_cen2 = (xs2[:-1] + xs2[1:])/2
# z_total=np.append(bin_cen1, bin_cen2)
# n_total=np.append(ns1,ns2)
z_total=np.append(bin_cen1, bin_cen2[1:])
n_total=np.append(np.append(ns1[:-1],np.array(ns1[-1]+ns2[0])),np.array(ns2[1:]))
z_max=z_total[np.where(n_total==np.max(n_total))[0][0]]
n_median = np.median(n_total[n_total != 0])
n_mean = np.mean(n_total)
n_bkg = np.mean(sorted(n_total)[2:-2]);
z_total_added = np.insert(
np.append(z_total, z_total[-1] + bin_width), 0, z_total[0] - bin_width)
n_total_added = np.insert(np.append(n_total, 0), 0, 0) - n_bkg
# print 'n_total_added', n_total_added
lumfn=pd.read_csv('/Users/taweewat/Documents/red_sequence/coma_cluster_luminosity_function/schecter_fn.csv',\
names=['M_r','theta(M)Mpc^-3'])
h=0.7
x=lumfn['M_r']+5*np.log10(h);
y=lumfn['theta(M)Mpc^-3']*(h**3)
f1d=interp1d(x, y,kind='cubic')
def lum_function(M):
alpha = -1.20
Nb = np.log(10) / 2.5 * 0.002 * (70 / 50.)**3
Mb_s = -21. + 5 * np.log10(70 / 50.)
return Nb * (10.**(0.4 * (alpha + 1) * (Mb_s - M))) * np.exp(-10.**(0.4 * (Mb_s - M)))
lum_fn = lambda z: integrate.quad( f1d, -23.455, ex.abs_mag(22.25, z))[0]
lum_vfn = np.vectorize(lum_fn)
dense_fn = lambda z: integrate.quad(ex.NFW_profile,0.001,cosmo.kpc_proper_per_arcmin(z).value/1e3)[0]
dense_vfn = np.vectorize(dense_fn)
n_total_adj=n_total_added #/(lum_vfn(z_total_added)*dense_vfn(z_total_added)) (adjusted the peak before picking it)
print 'n_total_added:', n_total_added
print 'n_total_adj:', n_total_adj
indi = np.where(n_total_adj == np.max(n_total_adj))[0][0]
# indi = np.where(n_total_added == np.max(n_total_added))[0][0]
z_fit = z_total_added[[indi - 1, indi, indi + 1]]; print 'z_fit', z_fit
n_fit = n_total_added[[indi - 1, indi, indi + 1]]; print 'n_fit', n_fit
def gaussian_func(x, a, mu):
sigma=0.035
return a * np.exp(-(x-mu)**2/(2*(sigma**2)))
if (n_fit[0]<0.) and (n_fit[2]<0.):
popt, pcov = curve_fit(gaussian_func, z_fit, [0,n_fit[1],0], p0=[n_fit[1],z_fit[1]])
else:
popt, pcov = curve_fit(gaussian_func, z_fit,
n_fit, p0=[n_fit[1], z_fit[1]])
# signal=tuple(popt)[0]
# def v_func(z):
# return (z**2+2*z)/(z**2+2*z+2)
# signal=((np.max(n_total)-np.mean(n_total))*(v_func(z_max)*(4000))**2)/5.3e6 #normalization for r~1 at z~0.15
# signal = (
# (tuple(popt)[0]) * (cosmo.luminosity_distance(tuple(popt)[1]).value)**1.5) / 5.3e5 # normalization for r~1 at z~0.15
def lum_function(M):
alpha = -1.20
Nb = np.log(10) / 2.5 * 0.002 * (70 / 50.)**3
Mb_s = -21. + 5 * np.log10(70 / 50.)
return Nb * (10.**(0.4 * (alpha + 1) * (Mb_s - M))) * np.exp(-10.**(0.4 * (Mb_s - M)))
lumfn=pd.read_csv('/Users/taweewat/Documents/red_sequence/coma_cluster_luminosity_function/schecter_fn.csv',\
names=['M_r','theta(M)Mpc^-3'])
h=0.7
x=lumfn['M_r']+5*np.log10(h);
y=lumfn['theta(M)Mpc^-3']*(h**3)
f1d=interp1d(x, y,kind='cubic')
z_max_fit = tuple(popt)[1]
# lum_factor = integrate.quad(lum_function, -24, abs_mag(21.60, tuple(popt)[1]))[0]
# lum_factor = cosmo.luminosity_distance(tuple(popt)[1]).value**-1.5*100
lum_factor = integrate.quad( f1d, -23.455, ex.abs_mag(22.25, z_max_fit))[0]
#-23.455: min abs Mag from schecter_fn.csv, 22.25: median of Mag r
density_factor=integrate.quad(ex.NFW_profile, 0.001, core_radius*cosmo.kpc_proper_per_arcmin(z_max_fit).value/1e3)[0]
signal = tuple(popt)[0] / (lum_factor * density_factor)
print 'z_max_fit', z_max_fit
print 'lum_factor:', lum_factor
print 'density_factor', density_factor
# duplicate=False ## set duplication
n_total_dup=0
## Plot the figure
cmap=matplotlib.cm.RdYlGn
if duplicate or colorerr:
fig,ax=plt.subplots(1,5,figsize=(25,5))
else:
fig,ax=plt.subplots(1,4,figsize=(20,5))
make_images(field,ax[0])
norm = matplotlib.colors.Normalize(vmin=0.01,vmax=2)
dfi_ri=dfi.loc[dfi['z_rmi'].dropna().index]
ax[1].scatter(dfi['MAG_i'],dfi['MAG_r']-dfi['MAG_i'],c='black',alpha=0.1)#dfi['w_rmi'],cmap=cmap)
ax[1].scatter(dfi_ri['MAG_i'],dfi_ri['MAG_r']-dfi_ri['MAG_i'],c=dfi_ri['w_rmi'],cmap=cmap)#,norm=norm)
ax[1].errorbar(dfi_ri['MAG_i'],dfi_ri['MAG_r']-dfi_ri['MAG_i'],xerr=dfi_ri['MAGERR_i'],yerr=np.sqrt(dfi_ri['MAGERR_r']**2+dfi_ri['MAGERR_i']**2),fmt='none',c='k',alpha=0.05)
# plt.plot(df.App_i,df.App_r-df.App_i,'.')
# ax[1].axhline(xs[:-1][(xs[:-1]<1.33) & (xs[:-1]>0.6)][0],lw=0.7,color='green')
for z in bins_rmi_cen:
ax[1].plot(I,linear_rmi(I,round(z,4)),color=s_m.to_rgba(z))
ax[1].set_ylim(0.25,1.5)
ax[1].set_xlim(16,24)
# cbar=plt.colorbar(s_m)
ax[1].set_xlabel('I')
ax[1].set_ylabel('R-I')
ax[1].set_title('z=0.35-0.675')#, icut:'+str(magi_cut_rmi))
# plt.plot([corr_f(z) for z in df.z.values[5:-12]],df.App_r[5:-12]-df.App_i[5:-12],'-')
dfi_gr=dfi.loc[dfi['z_gmr'].dropna().index]
ax[2].scatter(dfi['MAG_i'],dfi['MAG_g']-dfi['MAG_r'],c='black',alpha=0.1)#,c=dfi['w_gmr'],cmap=cmap)
ax[2].scatter(dfi_gr['MAG_i'],dfi_gr['MAG_g']-dfi_gr['MAG_r'],c=dfi_gr['w_gmr'],cmap=cmap)#,norm=norm)
ax[2].errorbar(dfi_gr['MAG_i'],dfi_gr['MAG_g']-dfi_gr['MAG_r'],xerr=dfi_gr['MAGERR_i'],yerr=np.sqrt(dfi_gr['MAGERR_g']**2+dfi_gr['MAGERR_r']**2),fmt='none',c='k',alpha=0.05)
# plt.plot(df.App_i,df.App_g-df.App_r,'.')
# ax[2].axhline(xs[:-1][(xs[:-1]<1.65) & (xs[:-1]>np.min(x2))][0],lw=0.7,color='green')
for z in bins_gmr_cen:
ax[2].plot(I,linear_gmr(I,round(z,4)),color=s_m.to_rgba(z))
ax[2].set_ylim(0.75,2)
ax[2].set_xlim(16,24)
# cbar=plt.colorbar(s_m)
ax[2].set_xlabel('I')
ax[2].set_ylabel('G-R')
ax[2].set_title('z=0.15-0.325')
# plt.plot([corr_f(z) for z in df.z.values[:-25]],df.App_g[:-25]-df.App_r[:-25],'-')
xs=np.arange(np.min(z_fit)-0.1,np.max(z_fit)+0.1,0.001)
ax[3].bar(bin_cen2, ns2, width=bin_width, color='#1f77b4', alpha=1.0)
ax[3].bar(bin_cen1, ns1, width=bin_width, color='#ff7f0e', alpha=1.0)
ax[3].bar(z_total, n_total, width=bin_width, color='grey', alpha=0.5)
ax[3].axvline(0.3525,ls='--')
ax[3].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[3].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[3].plot(z_fit,n_fit+n_bkg,'o',c='tab:purple')
ax[3].plot(xs, gaussian_func(xs, *popt)+n_bkg, c='tab:green', ls='--', label='fit: a=%.2f, mu=%.4f'% tuple(popt))
ax[3].axhline(n_median,color='tab:green',label='median:%.2f'%n_median)
ax[3].axhline(n_mean,color='tab:red',label='mean:%.2f'%n_mean)
ax[3].legend(loc='best')
ax[3].set_xlabel('z')
ax[3].set_xlim(0.1,0.7)
ax[3].set_title('ebv:%.3f,ebv_g-r:-%.3f,ebv_r-i:-%.3f'%(ebv[0],ebv_g-ebv_r,ebv_r-ebv_i))
if np.max(n_total)<30:
ax[3].set_ylim(0,30)
if duplicate:
xs = np.arange(np.min(z_fit_dup) - 0.1, np.max(z_fit_dup) + 0.1, 0.001)
ax[4].bar(bin_cen2, ns2-ns_dup2, width=bin_width, color='#1f77b4') #widht = 0.025
ax[4].bar(bin_cen1, ns1-ns_dup1, width=bin_width, color='#ff7f0e') #width = 0.025
ax[4].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[4].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[4].plot(z_fit_dup,n_fit_dup+n_bkg_dup,'o',c='tab:purple')
ax[4].plot(xs, gaussian_func(xs, *popt_dup)+n_bkg_dup, c='tab:green', ls='--', label='fit: a=%.2f, mu=%.4f'% tuple(popt))
ax[4].legend(loc='best')
ax[4].set_xlabel('z')
ax[4].set_xlim(0.1,0.7)
if np.max(n_total)<30:
ax[4].set_ylim(0,30)
if colorerr:
dfi_rmi = dfi[~np.isnan(dfi['z_rmi'])]
dfi_gmr = dfi[~np.isnan(dfi['z_gmr'])]
zs_gmr = np.arange(0.11, 0.3425, 0.002)
zs_rmi = np.arange(0.3425, 0.65, 0.002)
ntot_rmi = np.repeat(0, len(zs_rmi))
ntot_gmr = np.repeat(0, len(zs_gmr))
for i, row in dfi_rmi.iterrows():
# for i, row in dfi.iterrows():
i0 = row['MAG_i']
rmi = row['MAG_r'] - row['MAG_i']
rmierr = np.sqrt((row['MAGERR_r'])**2 + row['MAGERR_i']**2)
ntot_rmi0 = scipy.stats.norm(rmi, rmierr).pdf(
linear_rmi(i0, zs_rmi))
ntot_rmi = ntot_rmi + ntot_rmi0 * row['w_rmi']
ax[4].plot(zs_rmi,ntot_rmi0*row['w_rmi'],'-',color='tab:red',alpha=0.2)
for i, row in dfi_gmr.iterrows():
# for i, row in dfi.iterrows():
i0 = row['MAG_i']
gmr = row['MAG_g'] - row['MAG_r']
gmrerr = np.sqrt((row['MAGERR_g'])**2 + row['MAGERR_r']**2)
ntot_gmr0 = scipy.stats.norm(gmr, gmrerr).pdf(
linear_gmr(i0, zs_gmr))
ntot_gmr = ntot_gmr + ntot_gmr0 * row['w_gmr']
ax[4].plot(zs_gmr,ntot_gmr0*row['w_gmr'],'-',color='tab:cyan',alpha=0.2)
ax[4].plot(zs_gmr, ntot_gmr, '.')
ax[4].plot(zs_rmi, ntot_rmi, '.')
ax[4].axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax[4].axvline(redshift,color='red',label='z:%.2f'%redshift)
ax[4].legend(loc='best')
ax[4].set_xlabel('z')
ax[4].set_xlim(0.1, 0.7)
if np.max(np.append(ntot_gmr,ntot_rmi)) < 200:
ax[4].set_ylim(0, 200)
n_total_cerr=np.append(ntot_gmr,ntot_rmi)
else:
n_total_cerr=0
signal_final = signal_dup if duplicate else signal
plt.tight_layout(rect=[0, 0., 1, 0.98])
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/',
'redsq_richg%s_%s_all_.*_%s_tilted.png' % ('', mode, field))
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_richg%s_%s_all_%.3f_%s_tilted.png' % ('',mode,signal_final,field), dpi=120)
plt.close(fig)
# fig,ax=plt.subplots(1,4,figsize=(20,5))
# make_images(field,ax[0])
# dfi_gmi=dfi[~np.isnan(dfi['z_gmi'])]
# zs_gmi=np.arange(0.115,0.69,0.002)
# ntot_gmi=np.repeat(0,len(zs_gmi))
# for i, row in dfi_gmi.iterrows():
# i0 = row['MAG_i']
# gmi = row['MAG_g'] - row['MAG_i']
# gmierr = np.sqrt((row['MAGERR_g'])**2 + row['MAGERR_i']**2)
# ntot_gmi0 = scipy.stats.norm(gmi, gmierr).pdf(
# linear_gmi(i0, zs_gmi))
# ntot_gmi = ntot_gmi + ntot_gmi0 * row['w_gmi']
# ax[3].plot(zs_gmi,ntot_gmi0*row['w_gmi'],'-',color='tab:cyan',alpha=0.2)
# ax[1].scatter(dfi['MAG_i'],dfi['MAG_g']-dfi['MAG_i'],c='black',alpha=0.1)#dfi['w_rmi'],cmap=cmap)
# ax[1].scatter(dfi_gmi['MAG_i'],dfi_gmi['MAG_g']-dfi_gmi['MAG_i'],c=dfi_gmi['w_gmi'],cmap=cmap)
# ax[1].errorbar(dfi_gmi['MAG_i'], dfi_gmi['MAG_g'] - dfi_gmi['MAG_i'], xerr=dfi_gmi['MAGERR_i'],
# yerr=np.sqrt(dfi_gmi['MAGERR_g']**2 + dfi_gmi['MAGERR_i']**2), fmt='none', c='k', alpha=0.05)
# for z in np.arange(0.15, 0.71, bin_width):
# ax[1].plot(I,linear_gmi(I,z),color=s_m.to_rgba(z))
# ax[1].set_ylim(1.0,3.5)
# ax[1].set_xlim(16,24)
# ax[1].set_xlabel('I')
# ax[1].set_ylabel('G-I')
# ax[1].set_title('z=0.15-0.675')
# ns3,xs3=np.histogram(z_gmi,bins=np.arange(0.1325,0.7,0.035),weights=np.array(w_gmi)*np.array(w_col_gmi))
# bin_cen3 = (xs3[:-1] + xs3[1:])/2
# z_max_gmi = bin_cen3[np.where(ns3 == np.max(ns3))[0][0]]
# n_bkg = np.mean(sorted(ns3)[2:-2]);
# z_total_added = np.insert(
# np.append(bin_cen3, bin_cen3[-1] + bin_width), 0, bin_cen3[0] - bin_width)
# n_total_added = np.insert(np.append(ns3, 0), 0, 0) - n_bkg
# indi = np.where(n_total_added == np.max(n_total_added))[0][0]
# z_fit = z_total_added[[indi - 1, indi, indi + 1]]; print 'z_fit', z_fit
# n_fit = n_total_added[[indi - 1, indi, indi + 1]]; print 'n_fit', n_fit
# if (n_fit[0]<0.) and (n_fit[2]<0.):
# popt_gmi, pcov_gmi = curve_fit(gaussian_func, z_fit, [0,n_fit[1],0], p0=[n_fit[1],z_fit[1]])
# else:
# popt_gmi, pcov_gmi = curve_fit(gaussian_func, z_fit,
# n_fit, p0=[n_fit[1], z_fit[1]])
# lum_factor2 = integrate.quad( f1d, -23.455, abs_mag(22.25, tuple(popt_gmi)[1]))[0]
# density_factor2=integrate.quad(NFW_profile,0.001,cosmo.kpc_proper_per_arcmin(tuple(popt_gmi)[1]).value/1e3)[0]
# signal_gmi = tuple(popt_gmi)[0] / (lum_factor2 * density_factor2)
# z_max_fit_gmi = tuple(popt_gmi)[1]
# ax[2].bar(bin_cen3, ns3, width = 0.035, color='#1f77b4')#, alpha=0.5)
# ax[2].axvline(z_max_gmi, ls='--', color='purple',
# label='z_max=%.3f'%z_max_gmi)
# ax[2].axvline(z_max_fit_gmi, ls='--', color='tab:green',
# label='z_max_fit=%.3f'%z_max_fit_gmi)
# ax[2].axvline(redshift,color='red',label='z:%.3f'%redshift)
# ax[2].plot(z_fit,n_fit+n_bkg,'o',c='tab:purple')
# xs=np.arange(np.min(z_fit)-0.1,np.max(z_fit)+0.1,0.001)
# ax[2].plot(xs, gaussian_func(xs, *popt_gmi) + n_bkg, c='tab:green',
# ls='--', label='fit: a=%.2f, mu=%.4f' % tuple(popt_gmi))
# ax[2].legend(loc='best')
# ax[2].set_xlabel('z')
# ax[2].set_xlim(0.1,0.7)
# if np.max(n_total)<30:
# ax[2].set_ylim(0,30)
# ax[3].plot(zs_gmi,ntot_gmi,'.')
# ax[3].set_xlabel('z')
# ax[3].set_xlim(0.1,0.7)
# ax[3].axvline(z_max_fit_gmi,ls='--',color='purple',label='z_max_fit:%.2f'%z_max_fit_gmi)
# ax[3].axvline(redshift,color='red',label='z:%.2f'%redshift)
# if np.max(ntot_gmi)<70:
# ax[3].set_ylim(0,70)
# ntot_gmi_max=np.max(ntot_gmi)
# zs_gmi_max=zs_gmi[np.argmax(ntot_gmi)]
# ax[3].axvline(zs_gmi_max,ls='--',color='pink',label='zs_gmi_max:%.2f'%zs_gmi_max)
# plt.tight_layout(rect=[0, 0., 1, 0.98])
# plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_gmi_%s_all_%.3f_%s_tilted.png' %
# (mode, signal_gmi, field), dpi=120)
# plt.close(fig)
# transparent=False
if transparent:
fig,ax=plt.subplots(figsize=(7,4))
ax.bar(bin_cen2, ns2, width=0.035, color='#1f77b4') #widht = 0.025
ax.bar(bin_cen1, ns1, width = 0.035, color='#ff7f0e') #width = 0.025
ax.axvline(z_max,ls='--',color='purple',label='z_max:%.2f'%z_max)
ax.set_xlabel('z')
ax.set_xlim(0.1,0.7)
if np.max(n_total)<30:
ax.set_ylim(0,30)
for axp in ax.spines:
ax.spines[axp].set_color('white')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/',
'redsq_transparent_%.3f_%s_tilted.png' % (signal_final,field))
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal_final,field), dpi=120, transparent=True)
plt.close(fig)
# red_dir='/Users/taweewat/Documents/red_sequence/'
# rich_filename = 'all_richness_%s.csv'%extra_name
# if not os.path.isfile(red_dir + rich_filename):
# os.system("cp %s %s"%(red_dir+'all_richness_gremove_lum_silk_zf2.5.csv',red_dir+rich_filename))
# df_richness=pd.read_csv(red_dir+rich_filename)
# df_richness[['Nmax','Nbkg_mean','Nbkg_median','zmax','amp','zmax_fit','gremove','lum_factor','density_factor']]=np.nan
# df_richness.to_csv(red_dir+rich_filename)
# df_richness=pd.read_csv(red_dir+rich_filename)
# df_richness=df_richness.copy()
# df_richness.loc[df_richness['name'] == field, 'Nmax'] = np.max(n_total)
# df_richness.loc[df_richness['name'] == field, 'Nbkg_mean'] = np.mean(n_total)
# df_richness.loc[df_richness['name'] == field, 'Nbkg_median'] = np.median(n_total)
# df_richness.loc[df_richness['name'] == field, 'zmax'] = z_max
# df_richness.loc[df_richness['name'] == field, 'amp'] = signal_final
# df_richness.loc[df_richness['name'] == field, 'zmax_fit'] = z_max_fit
# df_richness.loc[df_richness['name'] == field, 'gremove'] = nog
# df_richness.loc[df_richness['name'] == field, 'lum_factor'] = lum_factor
# df_richness.loc[df_richness['name'] == field, 'density_factor'] = density_factor
# df_richness.to_csv(red_dir+rich_filename,index=0)
red_dir='/Users/taweewat/Documents/red_sequence/'
rich_filename = 'all_richness_%s.csv'%extra_name
if not os.path.isfile(red_dir + rich_filename):
df_richness=pd.DataFrame(columns=['name','Nmax','Nbkg_mean','Nbkg_median','zmax','amp','zmax_fit','gremove','lum_factor','density_factor'])
df_richness.to_csv(red_dir+rich_filename)
df_richness=pd.read_csv(red_dir+rich_filename,index_col=0)
dic={'name':field, 'Nmax':np.max(n_total), 'Nbkg_mean':np.mean(n_total), 'Nbkg_median':np.median(n_total), 'zmax':z_max,\
'amp':signal_final, 'zmax_fit':z_max_fit, 'gremove':nog, 'lum_factor':lum_factor, 'density_factor':density_factor}
if field in df_richness['name'].values:
df_richness=df_richness[df_richness['name']!=field]
df_richness=df_richness.append(pd.Series(dic),ignore_index=True).copy()
df_richness.to_csv(red_dir+rich_filename)
# get member redshfit in the figure
if img_redshift:
image_redshift(field,signal,tuple(popt)[1],mode)
# get total images with red-sequence
if img_filp:
image_flip(field,signal,tuple(popt)[1],mode)
if colorerr:
return z_total, n_total, n_total_cerr
else:
return z_total, n_total, n_total_dup
def pisco_combine_imgs(fields, mode='psf', mode2mass=''):
dir1='/Users/taweewat/Documents/red_sequence/pisco_color_plots/psf_est/'
dir2='/Users/taweewat/Documents/red_sequence/pisco_color_plots/'
dir3='/Users/taweewat/Documents/red_sequence/pisco_color_plots/'
dirout='/Users/taweewat/Documents/red_sequence/pisco_all/'
myReg = re.compile(r'(redsq_richg_%s_all_.*%s.*png)' % (mode, field))
myReg2=re.compile(r'(\d{1,3}\.\d{1,3})')
names=[]
for text in os.listdir(dir3):
if myReg.search(text) != None:
names.append(myReg.search(text).group())
if names==[]:
print 'no files', field
signal=myReg2.search(names[0]).group()
img1=dir1+'psf_est3_'+field+'_i.png'
img15='/Users/taweewat/Documents/red_sequence/pisco_color_plots/uncer_%s_%s.png'%(mode,field)
# img2=dir2+'star_galaxy_sep_12_all'+field+'.png'
img2='/Users/taweewat/Documents/red_sequence/pisco_image_redshift/img_redshift_%s_%.3f_%s.png' %(mode,float(signal),field)
print img2
img3=dir3+names[0]
images_list=[img1, img2, img3, img15]
imgs=[]
try:
imgs = [ Image_PIL.open(i) for i in images_list ]
except:
print 'no image file', field
mw = imgs[2].width/2
h = imgs[0].height+imgs[1].height/1+imgs[2].height/2
result = Image_PIL.new("RGBA", (mw, h))
y,index=0,0
for i in imgs:
if index<3:
if (index==2):# or (index==1):
i=i.resize((i.width/2,i.height/2))
result.paste(i, (0, y))
y += i.size[1]
index+=1
elif index==3:
i=i.resize((i.width/2,i.height/2))
result.paste(i, (imgs[0].width,0))
result.save(dirout + 'all_combine_%s_%s_%s_%s_%s.png' %
(extra_name, mode2mass, myReg2.search(names[0]).group(), mode, field))
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
print 'remove', f
os.remove(os.path.join(dir, f))
def image_redshift(field,signal,redshift,mode):
df_total=pd.read_csv('/Users/taweewat/Documents/pisco_code/slr_output/galaxy_%s_final_%s.csv'%(mode,field),index_col=0)
df_star=pd.read_csv('/Users/taweewat/Documents/pisco_code/slr_output/star_psf_total_%s.csv'%field,index_col=0)
# df_star=df_star[df_star['SG']>0.95]
hdu=fits.open('/Users/taweewat/Documents/pisco_code/final/coadd_c%s_i.fits'%field)
img=hdu[0].data.astype(float)
img -= np.median(img)
df_total['redshift_m']=df_total.apply(lambda row: ex.redshift_f(row), axis=1)
def size_f(row):
if not np.isnan(row['w_gmr']):
size=row['w_gmr']
if not np.isnan(row['w_rmi']):
size=row['w_rmi']
if np.isnan(row['w_rmi']) and np.isnan(row['w_gmr']):
size=0
return size
df_total['size_m']=df_total.apply(lambda row: size_f(row), axis=1)
df_total=df_total[df_total['redshift_m'] > 0].copy()
norm = matplotlib.colors.Normalize(vmin=0,vmax=500)
c_m = matplotlib.cm.Greys_r
s_m = matplotlib.cm.ScalarMappable(cmap=c_m, norm=norm)
s_m.set_array([])
normalize = matplotlib.colors.Normalize(vmin=0.1, vmax=0.7)
fig, (a0, a1) = plt.subplots(1,2, figsize=(30,18), gridspec_kw = {'width_ratios':[0.8, 1]})
# a0.imshow(img, cmap=c_m, norm=norm, origin='lower')
# a0.scatter(df_star['XWIN_IMAGE_i'].values,df_star['YWIN_IMAGE_i'].values,s=100, marker='*', facecolors='none', edgecolors='yellow', label='star')
# df1i=df_total[df_total['w_rmi']>0.1]
# df2i=df_total[df_total['w_rmi']<=0.1]
# # a0.scatter(df1i['XWIN_IMAGE_i'].values,df1i['YWIN_IMAGE_i'].values,s=100, facecolors='none', edgecolors='blue')
# a0.scatter(df1i['XWIN_IMAGE_i'].values, df1i['YWIN_IMAGE_i'].values, s=100, c=df1i['size_m'].values, cmap='RdYlGn')
# a0.scatter(df2i['XWIN_IMAGE_i'].values,df2i['YWIN_IMAGE_i'].values, s=100, facecolors='none', edgecolors='white')
# a0.set_xlim(0,1600)
# a0.set_ylim(0, 2250)
try:
img2 = mpimg.imread('/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img4.jpeg' % field)
except:
img2 = mpimg.imread('/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img.jpeg' % field)
imgplot = a0.imshow(img2)
a0.axis('off')
a0.annotate('Redshift: %.3f\nRichness: %.2f' %
(redshift, signal), xy=(150, 100), color='white')
a1.imshow(img, cmap=c_m, norm=norm, origin='lower')
a1.scatter(df_star['XWIN_IMAGE_i'].values,df_star['YWIN_IMAGE_i'].values, s=300,edgecolor='orange', facecolor='none',lw=3)
#,s=100, marker='*', facecolors='none', edgecolors='yellow', label='star')
axi = a1.scatter(df_total['XWIN_IMAGE_i'].values, df_total['YWIN_IMAGE_i'].values,
s=(df_total['size_m'].values * 200)+30, c=df_total['redshift_m'].values, cmap='tab20b', norm=normalize)
plt.colorbar(axi) # df_total['size_m'].values*300
a1.set_xlim(0, 1600)
a1.set_ylim(0, 2250)
plt.tight_layout()
left, bottom, width, height = [0.05, 0.24, 0.3, 0.2]
ax2 = fig.add_axes([left, bottom, width, height])
ax2.imshow(mpimg.imread(
'/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal, field)))
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('off')
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_image_redshift/img_redshift_%s_%.3f_%s.png' %
(mode,signal,field), dpi=50)
plt.close(fig)
def image_flip(field, signal, redshift, mode):
img = mpimg.imread(
'/Users/taweewat/Documents/pisco_code/Chips_images/aplpy4_%s_img.jpeg' % field)
fig, ax = plt.subplots(figsize=(7, 7))
imgplot = ax.imshow(img)
ax.axis('off')
ax.annotate('Redshift: %.3f\nRichness: %.2f' %
(redshift, signal), xy=(150, 100), color='white')
left, bottom, width, height = [0.2, 0.18, 0.3, 0.2]
ax2 = fig.add_axes([left, bottom, width, height])
ax2.imshow(mpimg.imread(
'/Users/taweewat/Documents/red_sequence/pisco_color_plots/redsq_transparent_%.3f_%s_tilted.png' % (signal, field)))
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('off')
# plt.tight_layout()
plt.savefig('/Users/taweewat/Documents/red_sequence/pisco_image_redshift/image_flip_%s_%.3f_%s.png' %
(mode, signal, field), dpi=200)
plt.close(fig)
if __name__ == "__main__":
"""
execute:
python pisco_pipeline/pisco_photometry_all.py CHIPS111 psf slr
#updated version with no2mass option for no more comparison with known 2mass stars
python pisco_pipeline/pisco_photometry_all.py CHIPS111 psf allslr no2mass
"""
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
field = str(sys.argv[1])
mode = str(sys.argv[2]) #aper, psf, auto, hybrid
all_argv=sys.argv[3:] #allslr, slr, noslr
if (all_argv[0]=='allslr') | (all_argv[0]=='slr'):
slr=str(all_argv[0])
slr_param=True
elif all_argv[0]=='noslr':
slr='no_slr'
slr_param=False
if all_argv[1]=='2mass':
mode2mass=''
elif all_argv[1]=='no2mass':
mode2mass='_no2mass'
home='/Users/taweewat/Documents/pisco_code/' #09, 171208
# dirs=['ut170103/','ut170104/','ut170619/','ut170621/','ut170624/','ut171208/','ut171209/','ut171212/']
dirs=['ut190412','ut190413']
# 'ut171208/', 'ut171209/','ut171212/', 'ut170621/', 'ut170624/'
# dirs = ['ut170621/','ut170624/']
# dirs = ['ut170619/']
# dirs = ['ut170103/']
names=[]
myReg=re.compile(r'(CHIPS\d{4}[+-]\d{4})|(Field\d{3})')
for di in dirs:
dir=home+di
for text in os.listdir(dir):
if myReg.search(text) != None:
names.append(myReg.search(text).group())
all_fields=list(set(names))
# print all_fields
infile = open('/Users/taweewat/Documents/xray_project/code_github/allremove_chips.txt', 'r')
exception = [i.strip() for i in infile.readlines()]
all_fields_cut = all_fields[:]
all_fields_cut = ['SDSS603','SDSS501','SDSS123']
print all_fields_cut
# all_fields_cut = ['CHIPS1422-2728']
notgoflag=True
z_total_all,n_total_all,n_total_dup_all=[],[],[]
for index, field in enumerate(all_fields_cut):
print field, '%i/%i' % (index, len(all_fields_cut))
# if field == 'CHIPS0122-2646':
# notgoflag = False; continue
# if notgoflag:
# continue
if field in exception:
continue
if field in ['CHIPS1933-1511']:
continue
if slr=='allslr':
print 'allslr'
pisco_photometry_v4(field)
elif slr=='slr':
# star_galaxy_bleem(field)
pisco_cut_frame(field)
pisco_photometry_psf_v4(field, mode=mode, mode2mass=mode2mass, slr=slr_param)
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/'\
,r'(redsq_%s_all_.*%s.*png)'%(mode,field))
# pisco_tilt_resequence(field, mode='psf', mode2mass='')
z_total=pisco_tilt_resequence(field, mode=mode, mode2mass=mode2mass)
# z_total_all.append(z_total)
# n_total_all.append(n_total)
# n_total_dup_all.append(n_total_dup)
# pisco_combine_imgs(field, mode=mode, mode2mass=mode2mass)
# pickle.dump( [z_total_all,n_total_all,n_total_dup_all], open( "pickle_all_richness_%s.pickle"%extra_name, "wb" ) )
# print 'save pickle fie at', "pickle_all_richness_%s.pickle" % extra_name
elif slr == 'no_slr':
pisco_cut_frame(field)
pisco_photometry_psf_v4(field, mode=mode, mode2mass=mode2mass, slr=slr_param)
purge('/Users/taweewat/Documents/red_sequence/pisco_color_plots/'\
,r'(redsq_%s_all_.*%s.*png)'%(mode,field))
z_total=pisco_tilt_resequence(field, mode=mode, mode2mass=mode2mass)
# z_total_all.append(z_total)
# n_total_all.append(n_total)
# n_total_dup_all.append(n_total_dup)
# pisco_combine_imgs(field, mode=mode, mode2mass=mode2mass)
# pickle.dump( [z_total_all,n_total_all,n_total_dup_all], open( "pickle_all_richness_%s.pickle"%extra_name, "wb" ) )
# print 'save pickle fie at', "pickle_all_richness_%s.pickle" % extra_name
purge('final', "proj_coadd_c%s_.*\.fits" % field)
purge('.', "proto_psf_%s_.*\.fits" % field)
purge('.', "samp_psf_%s_.*\.fits" % field)
purge('.', "resi_psf_%s_.*\.fits" % field)
purge('.', "snap_psf_%s_.*\.fits" % field)
purge('.', "chi_psf_%s_.*\.fits" % field)
# purge('psfex_output', "psf_%s_.*\.fits" % field)
# purge('slr_output', "a_psf_%s_.*\.fits" % field)
purge('final', "coadd_c%s_sq_.*\.fits" % field)
| mit |
burakbayramli/classnotes | stat/stat_065_powerlaw/powerlaw.py | 2 | 106244 | #The MIT License (MIT)
#
#Copyright (c) 2013 Jeff Alstott
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
# as described in https://docs.python.org/2/library/functions.html#print
from __future__ import print_function
import sys
__version__ = "1.3.4"
class Fit(object):
"""
A fit of a data set to various probability distributions, namely power
laws. For fits to power laws, the methods of Clauset et al. 2007 are used.
These methods identify the portion of the tail of the distribution that
follows a power law, beyond a value xmin. If no xmin is
provided, the optimal one is calculated and assigned at initialization.
Parameters
----------
data : list or array
discrete : boolean, optional
Whether the data is discrete (integers).
xmin : int or float, optional
The data value beyond which distributions should be fitted. If
None an optimal one will be calculated.
xmax : int or float, optional
The maximum value of the fitted distributions.
estimate_discrete : bool, optional
Whether to estimate the fit of a discrete power law using fast
analytical methods, instead of calculating the fit exactly with
slow numerical methods. Very accurate with xmin>6
sigma_threshold : float, optional
Upper limit on the standard error of the power law fit. Used after
fitting, when identifying valid xmin values.
parameter_range : dict, optional
Dictionary of valid parameter ranges for fitting. Formatted as a
dictionary of parameter names ('alpha' and/or 'sigma') and tuples
of their lower and upper limits (ex. (1.5, 2.5), (None, .1)
"""
def __init__(self, data,
discrete=False,
xmin=None, xmax=None,
fit_method='Likelihood',
estimate_discrete=True,
discrete_approximation='round',
sigma_threshold=None,
parameter_range=None,
fit_optimizer=None,
xmin_distance='D',
**kwargs):
self.data_original = data
# import logging
from numpy import asarray
self.data = asarray(self.data_original, dtype='float')
self.discrete = discrete
self.fit_method = fit_method
self.estimate_discrete = estimate_discrete
self.discrete_approximation = discrete_approximation
self.sigma_threshold = sigma_threshold
self.parameter_range = parameter_range
self.given_xmin = xmin
self.given_xmax = xmax
self.xmin = self.given_xmin
self.xmax = self.given_xmax
self.xmin_distance = xmin_distance
if 0 in self.data:
print("Values less than or equal to 0 in data. Throwing out 0 or negative values", file=sys.stderr)
self.data = self.data[self.data>0]
if self.xmax:
self.xmax = float(self.xmax)
self.fixed_xmax = True
n_above_max = sum(self.data>self.xmax)
self.data = self.data[self.data<=self.xmax]
else:
n_above_max = 0
self.fixed_xmax = False
if not all(self.data[i] <= self.data[i+1] for i in range(len(self.data)-1)):
from numpy import sort
self.data = sort(self.data)
self.fitting_cdf_bins, self.fitting_cdf = cdf(self.data, xmin=None, xmax=self.xmax)
if xmin and type(xmin)!=tuple and type(xmin)!=list:
self.fixed_xmin = True
self.xmin = float(xmin)
self.noise_flag = None
pl = Power_Law(xmin=self.xmin,
xmax=self.xmax,
discrete=self.discrete,
fit_method=self.fit_method,
estimate_discrete=self.estimate_discrete,
data=self.data,
parameter_range=self.parameter_range)
setattr(self,self.xmin_distance, getattr(pl, self.xmin_distance))
self.alpha = pl.alpha
self.sigma = pl.sigma
#self.power_law = pl
else:
self.fixed_xmin=False
print("Calculating best minimal value for power law fit", file=sys.stderr)
self.find_xmin()
self.data = self.data[self.data>=self.xmin]
self.n = float(len(self.data))
self.n_tail = self.n + n_above_max
self.supported_distributions = {'power_law': Power_Law,
'lognormal': Lognormal,
'exponential': Exponential,
'truncated_power_law': Truncated_Power_Law,
'stretched_exponential': Stretched_Exponential,
}
#'gamma': None}
def __getattr__(self, name):
if name in self.supported_distributions.keys():
#from string import capwords
#dist = capwords(name, '_')
#dist = globals()[dist] #Seems a hack. Might try import powerlaw; getattr(powerlaw, dist)
dist = self.supported_distributions[name]
if dist == Power_Law:
parameter_range = self.parameter_range
else:
parameter_range = None
setattr(self,
name,
dist(data=self.data,
xmin=self.xmin,
xmax=self.xmax,
discrete=self.discrete,
fit_method=self.fit_method,
estimate_discrete=self.estimate_discrete,
discrete_approximation=self.discrete_approximation,
parameter_range=parameter_range,
parent_Fit=self))
return getattr(self, name)
else:
raise AttributeError(name)
def find_xmin(self, xmin_distance=None):
"""
Returns the optimal xmin beyond which the scaling regime of the power
law fits best. The attribute self.xmin of the Fit object is also set.
The optimal xmin beyond which the scaling regime of the power law fits
best is identified by minimizing the Kolmogorov-Smirnov distance
between the data and the theoretical power law fit.
This is the method of Clauset et al. 2007.
"""
from numpy import unique, asarray, argmin
#Much of the rest of this function was inspired by Adam Ginsburg's plfit code,
#specifically the mapping and sigma threshold behavior:
#http://code.google.com/p/agpy/source/browse/trunk/plfit/plfit.py?spec=svn359&r=357
if not self.given_xmin:
possible_xmins = self.data
else:
possible_ind = min(self.given_xmin)<=self.data
possible_ind *= self.data<=max(self.given_xmin)
possible_xmins = self.data[possible_ind]
xmins, xmin_indices = unique(possible_xmins, return_index=True)
#Don't look at last xmin, as that's also the xmax, and we want to at least have TWO points to fit!
xmins = xmins[:-1]
xmin_indices = xmin_indices[:-1]
if xmin_distance is None:
xmin_distance = self.xmin_distance
if len(xmins)<=0:
print("Less than 2 unique data values left after xmin and xmax "
"options! Cannot fit. Returning nans.", file=sys.stderr)
from numpy import nan, array
self.xmin = nan
self.D = nan
self.V = nan
self.Asquare = nan
self.Kappa = nan
self.alpha = nan
self.sigma = nan
self.n_tail = nan
setattr(self, xmin_distance+'s', array([nan]))
self.alphas = array([nan])
self.sigmas = array([nan])
self.in_ranges = array([nan])
self.xmins = array([nan])
self.noise_flag = True
return self.xmin
def fit_function(xmin):
pl = Power_Law(xmin=xmin,
xmax=self.xmax,
discrete=self.discrete,
estimate_discrete=self.estimate_discrete,
fit_method=self.fit_method,
data=self.data,
parameter_range=self.parameter_range,
parent_Fit=self)
return getattr(pl, xmin_distance), pl.alpha, pl.sigma, pl.in_range()
fits = asarray(list(map(fit_function, xmins)))
# logging.warning(fits.shape)
setattr(self, xmin_distance+'s', fits[:,0])
self.alphas = fits[:,1]
self.sigmas = fits[:,2]
self.in_ranges = fits[:,3].astype(bool)
self.xmins = xmins
good_values = self.in_ranges
if self.sigma_threshold:
good_values = good_values * (self.sigmas < self.sigma_threshold)
if good_values.all():
min_D_index = argmin(getattr(self, xmin_distance+'s'))
self.noise_flag = False
elif not good_values.any():
min_D_index = argmin(getattr(self, xmin_distance+'s'))
self.noise_flag = True
else:
from numpy.ma import masked_array
masked_Ds = masked_array(getattr(self, xmin_distance+'s'), mask=-good_values)
min_D_index = masked_Ds.argmin()
self.noise_flag = False
if self.noise_flag:
print("No valid fits found.", file=sys.stderr)
#Set the Fit's xmin to the optimal xmin
self.xmin = xmins[min_D_index]
setattr(self, xmin_distance, getattr(self, xmin_distance+'s')[min_D_index])
self.alpha = self.alphas[min_D_index]
self.sigma = self.sigmas[min_D_index]
#Update the fitting CDF given the new xmin, in case other objects, like
#Distributions, want to use it for fitting (like if they do KS fitting)
self.fitting_cdf_bins, self.fitting_cdf = self.cdf()
return self.xmin
def nested_distribution_compare(self, dist1, dist2, nested=True, **kwargs):
"""
Returns the loglikelihood ratio, and its p-value, between the two
distribution fits, assuming the candidate distributions are nested.
Parameters
----------
dist1 : string
Name of the first candidate distribution (ex. 'power_law')
dist2 : string
Name of the second candidate distribution (ex. 'exponential')
nested : bool or None, optional
Whether to assume the candidate distributions are nested versions
of each other. None assumes not unless the name of one distribution
is a substring of the other. True by default.
Returns
-------
R : float
Loglikelihood ratio of the two distributions' fit to the data. If
greater than 0, the first distribution is preferred. If less than
0, the second distribution is preferred.
p : float
Significance of R
"""
return self.distribution_compare(dist1, dist2, nested=nested, **kwargs)
def distribution_compare(self, dist1, dist2, nested=None, **kwargs):
"""
Returns the loglikelihood ratio, and its p-value, between the two
distribution fits, assuming the candidate distributions are nested.
Parameters
----------
dist1 : string
Name of the first candidate distribution (ex. 'power_law')
dist2 : string
Name of the second candidate distribution (ex. 'exponential')
nested : bool or None, optional
Whether to assume the candidate distributions are nested versions
of each other. None assumes not unless the name of one distribution
is a substring of the other.
Returns
-------
R : float
Loglikelihood ratio of the two distributions' fit to the data. If
greater than 0, the first distribution is preferred. If less than
0, the second distribution is preferred.
p : float
Significance of R
"""
if (dist1 in dist2) or (dist2 in dist1) and nested is None:
print("Assuming nested distributions", file=sys.stderr)
nested = True
dist1 = getattr(self, dist1)
dist2 = getattr(self, dist2)
loglikelihoods1 = dist1.loglikelihoods(self.data)
loglikelihoods2 = dist2.loglikelihoods(self.data)
return loglikelihood_ratio(
loglikelihoods1, loglikelihoods2,
nested=nested,
**kwargs)
def loglikelihood_ratio(self, dist1, dist2, nested=None, **kwargs):
"""
Another name for distribution_compare.
"""
return self.distribution_compare(dist1, dist2, nested=nested, **kwargs)
def cdf(self, original_data=False, survival=False, **kwargs):
"""
Returns the cumulative distribution function of the data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to return the complementary cumulative distribution
function, 1-CDF, also known as the survival function.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
return cdf(data, xmin=xmin, xmax=xmax, survival=survival,
**kwargs)
def ccdf(self, original_data=False, survival=True, **kwargs):
"""
Returns the complementary cumulative distribution function of the data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to return the complementary cumulative distribution
function, also known as the survival function, or the cumulative
distribution function, 1-CCDF.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is greater than or equal to X.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
return cdf(data, xmin=xmin, xmax=xmax, survival=survival,
**kwargs)
def pdf(self, original_data=False, **kwargs):
"""
Returns the probability density function (normalized histogram) of the
data.
Parameters
----------
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
Returns
-------
bin_edges : array
The edges of the bins of the probability density function.
probabilities : array
The portion of the data that is within the bin. Length 1 less than
bin_edges, as it corresponds to the spaces between them.
"""
if original_data:
data = self.data_original
xmin = None
xmax = None
else:
data = self.data
xmin = self.xmin
xmax = self.xmax
edges, hist = pdf(data, xmin=xmin, xmax=xmax, **kwargs)
return edges, hist
def plot_cdf(self, ax=None, original_data=False, survival=False, **kwargs):
"""
Plots the CDF to a new figure or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_cdf(data, ax=ax, survival=survival, **kwargs)
def plot_ccdf(self, ax=None, original_data=False, survival=True, **kwargs):
"""
Plots the CCDF to a new figure or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_cdf(data, ax=ax, survival=survival, **kwargs)
def plot_pdf(self, ax=None, original_data=False,
linear_bins=False, **kwargs):
"""
Plots the probability density function (PDF) or the data to a new figure
or to axis ax if provided.
Parameters
----------
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
original_data : bool, optional
Whether to use all of the data initially passed to the Fit object.
If False, uses only the data used for the fit (within xmin and
xmax.)
linear_bins : bool, optional
Whether to use linearly spaced bins (True) or logarithmically
spaced bins (False). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if original_data:
data = self.data_original
else:
data = self.data
return plot_pdf(data, ax=ax, linear_bins=linear_bins, **kwargs)
class Distribution(object):
"""
An abstract class for theoretical probability distributions. Can be created
with particular parameter values, or fitted to a dataset. Fitting is
by maximum likelihood estimation by default.
Parameters
----------
xmin : int or float, optional
The data value beyond which distributions should be fitted. If
None an optimal one will be calculated.
xmax : int or float, optional
The maximum value of the fitted distributions.
discrete : boolean, optional
Whether the distribution is discrete (integers).
data : list or array, optional
The data to which to fit the distribution. If provided, the fit will
be created at initialization.
fit_method : "Likelihood" or "KS", optional
Method for fitting the distribution. "Likelihood" is maximum Likelihood
estimation. "KS" is minimial distance estimation using The
Kolmogorov-Smirnov test.
parameters : tuple or list, optional
The parameters of the distribution. Will be overridden if data is
given or the fit method is called.
parameter_range : dict, optional
Dictionary of valid parameter ranges for fitting. Formatted as a
dictionary of parameter names ('alpha' and/or 'sigma') and tuples
of their lower and upper limits (ex. (1.5, 2.5), (None, .1)
initial_parameters : tuple or list, optional
Initial values for the parameter in the fitting search.
discrete_approximation : "round", "xmax" or int, optional
If the discrete form of the theoeretical distribution is not known,
it can be estimated. One estimation method is "round", which sums
the probability mass from x-.5 to x+.5 for each data point. The other
option is to calculate the probability for each x from 1 to N and
normalize by their sum. N can be "xmax" or an integer.
parent_Fit : Fit object, optional
A Fit object from which to use data, if it exists.
"""
def __init__(self,
xmin=1, xmax=None,
discrete=False,
fit_method='Likelihood',
data=None,
parameters=None,
parameter_range=None,
initial_parameters=None,
discrete_approximation='round',
parent_Fit=None,
**kwargs):
self.xmin = xmin
self.xmax = xmax
self.discrete = discrete
self.fit_method = fit_method
self.discrete_approximation = discrete_approximation
self.parameter1 = None
self.parameter2 = None
self.parameter3 = None
self.parameter1_name = None
self.parameter2_name = None
self.parameter3_name = None
if parent_Fit:
self.parent_Fit = parent_Fit
if parameters is not None:
self.parameters(parameters)
if parameter_range:
self.parameter_range(parameter_range)
if initial_parameters:
self._given_initial_parameters(initial_parameters)
if (data is not None) and not (parameter_range and self.parent_Fit):
self.fit(data)
def fit(self, data=None, suppress_output=False):
"""
Fits the parameters of the distribution to the data. Uses options set
at initialization.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
if self.fit_method=='Likelihood':
def fit_function(params):
self.parameters(params)
return -sum(self.loglikelihoods(data))
elif self.fit_method=='KS':
def fit_function(params):
self.parameters(params)
self.KS(data)
return self.D
from scipy.optimize import fmin
parameters, negative_loglikelihood, iter, funcalls, warnflag, = \
fmin(
lambda params: fit_function(params),
self.initial_parameters(data),
full_output=1,
disp=False)
self.parameters(parameters)
if not self.in_range():
self.noise_flag=True
else:
self.noise_flag=False
if self.noise_flag and not suppress_output:
print("No valid fits found.", file=sys.stderr)
self.loglikelihood =-negative_loglikelihood
self.KS(data)
def KS(self, data=None):
"""
Returns the Kolmogorov-Smirnov distance D between the distribution and
the data. Also sets the properties D+, D-, V (the Kuiper testing
statistic), and Kappa (1 + the average difference between the
theoretical and empirical distributions).
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
if len(data)<2:
print("Not enough data. Returning nan", file=sys.stderr)
from numpy import nan
self.D = nan
self.D_plus = nan
self.D_minus = nan
self.Kappa = nan
self.V = nan
self.Asquare = nan
return self.D
if hasattr(self, 'parent_Fit'):
bins = self.parent_Fit.fitting_cdf_bins
Actual_CDF = self.parent_Fit.fitting_cdf
ind = bins>=self.xmin
bins = bins[ind]
Actual_CDF = Actual_CDF[ind]
dropped_probability = Actual_CDF[0]
Actual_CDF -= dropped_probability
Actual_CDF /= 1-dropped_probability
else:
bins, Actual_CDF = cdf(data)
Theoretical_CDF = self.cdf(bins)
CDF_diff = Theoretical_CDF - Actual_CDF
self.D_plus = CDF_diff.max()
self.D_minus = -1.0*CDF_diff.min()
from numpy import mean
self.Kappa = 1 + mean(CDF_diff)
self.V = self.D_plus + self.D_minus
self.D = max(self.D_plus, self.D_minus)
self.Asquare = sum((
(CDF_diff**2) /
(Theoretical_CDF * (1 - Theoretical_CDF))
)[1:]
)
return self.D
def ccdf(self,data=None, survival=True):
"""
The complementary cumulative distribution function (CCDF) of the
theoretical distribution. Calculated for the values given in data
within xmin and xmax, if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
True by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return self.cdf(data=data, survival=survival)
def cdf(self,data=None, survival=False):
"""
The cumulative distribution function (CDF) of the theoretical
distribution. Calculated for the values given in data within xmin and
xmax, if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
if self._cdf_xmin==1:
#If cdf_xmin is 1, it means we don't have the numerical accuracy to
#calculate this tail. So we make everything 1, indicating
#we're at the end of the tail. Such an xmin should be thrown
#out by the KS test.
from numpy import ones
CDF = ones(n)
return CDF
CDF = self._cdf_base_function(data) - self._cdf_xmin
norm = 1 - self._cdf_xmin
if self.xmax:
norm = norm - (1 - self._cdf_base_function(self.xmax))
CDF = CDF/norm
if survival:
CDF = 1 - CDF
possible_numerical_error = False
from numpy import isnan, min
if isnan(min(CDF)):
print("'nan' in fit cumulative distribution values.", file=sys.stderr)
possible_numerical_error = True
#if 0 in CDF or 1 in CDF:
# print("0 or 1 in fit cumulative distribution values.", file=sys.stderr)
# possible_numerical_error = True
if possible_numerical_error:
print("Likely underflow or overflow error: the optimal fit for this distribution gives values that are so extreme that we lack the numerical precision to calculate them.", file=sys.stderr)
return CDF
@property
def _cdf_xmin(self):
return self._cdf_base_function(self.xmin)
def pdf(self, data=None):
"""
Returns the probability density function (normalized histogram) of the
theoretical distribution for the values in data within xmin and xmax,
if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
Returns
-------
probabilities : array
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
if not self.discrete:
f = self._pdf_base_function(data)
C = self._pdf_continuous_normalizer
likelihoods = f*C
else:
if self._pdf_discrete_normalizer:
f = self._pdf_base_function(data)
C = self._pdf_discrete_normalizer
likelihoods = f*C
elif self.discrete_approximation=='round':
lower_data = data-.5
upper_data = data+.5
#Temporarily expand xmin and xmax to be able to grab the extra bit of
#probability mass beyond the (integer) values of xmin and xmax
#Note this is a design decision. One could also say this extra
#probability "off the edge" of the distribution shouldn't be included,
#and that implementation is retained below, commented out. Note, however,
#that such a cliff means values right at xmin and xmax have half the width to
#grab probability from, and thus are lower probability than they would otherwise
#be. This is particularly concerning for values at xmin, which are typically
#the most likely and greatly influence the distribution's fit.
self.xmin -= .5
if self.xmax:
self.xmax += .5
#Clean data for invalid values before handing to cdf, which will purge them
#lower_data[lower_data<self.xmin] +=.5
#if self.xmax:
# upper_data[upper_data>self.xmax] -=.5
likelihoods = self.cdf(upper_data)-self.cdf(lower_data)
self.xmin +=.5
if self.xmax:
self.xmax -= .5
else:
if self.discrete_approximation=='xmax':
upper_limit = self.xmax
else:
upper_limit = self.discrete_approximation
# from mpmath import exp
from numpy import arange
X = arange(self.xmin, upper_limit+1)
PDF = self._pdf_base_function(X)
PDF = (PDF/sum(PDF)).astype(float)
likelihoods = PDF[(data-self.xmin).astype(int)]
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
return likelihoods
@property
def _pdf_continuous_normalizer(self):
C = 1 - self._cdf_xmin
if self.xmax:
C -= 1 - self._cdf_base_function(self.xmax+1)
C = 1.0/C
return C
@property
def _pdf_discrete_normalizer(self):
return False
def parameter_range(self, r, initial_parameters=None):
"""
Set the limits on the range of valid parameters to be considered while
fitting.
Parameters
----------
r : dict
A dictionary of the parameter range. Restricted parameter
names are keys, and with tuples of the form (lower_bound,
upper_bound) as values.
initial_parameters : tuple or list, optional
Initial parameter values to start the fitting search from.
"""
from types import FunctionType
if type(r)==FunctionType:
self._in_given_parameter_range = r
else:
self._range_dict = r
if initial_parameters:
self._given_initial_parameters = initial_parameters
if self.parent_Fit:
self.fit(self.parent_Fit.data)
def in_range(self):
"""
Whether the current parameters of the distribution are within the range
of valid parameters.
"""
try:
r = self._range_dict
result = True
for k in r.keys():
#For any attributes we've specificed, make sure we're above the lower bound
#and below the lower bound (if they exist). This must be true of all of them.
lower_bound, upper_bound = r[k]
if upper_bound is not None:
result *= getattr(self, k) < upper_bound
if lower_bound is not None:
result *= getattr(self, k) > lower_bound
return result
except AttributeError:
try:
in_range = self._in_given_parameter_range(self)
except AttributeError:
in_range = self._in_standard_parameter_range()
return bool(in_range)
def initial_parameters(self, data):
"""
Return previously user-provided initial parameters or, if never
provided, calculate new ones. Default initial parameter estimates are
unique to each theoretical distribution.
"""
try:
return self._given_initial_parameters
except AttributeError:
return self._initial_parameters(data)
def likelihoods(self, data):
"""
The likelihoods of the observed data from the theoretical distribution.
Another name for the probabilities or probability density function.
"""
return self.pdf(data)
def loglikelihoods(self, data):
"""
The logarithm of the likelihoods of the observed data from the
theoretical distribution.
"""
from numpy import log
return log(self.likelihoods(data))
def plot_ccdf(self, data=None, ax=None, survival=True, **kwargs):
"""
Plots the complementary cumulative distribution function (CDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
return self.plot_cdf(data, ax=None, survival=survival, **kwargs)
def plot_cdf(self, data=None, ax=None, survival=False, **kwargs):
"""
Plots the cumulative distribution function (CDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
from numpy import unique
bins = unique(trim_to_range(data, xmin=self.xmin, xmax=self.xmax))
CDF = self.cdf(bins, survival=survival)
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, CDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, CDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def plot_pdf(self, data=None, ax=None, **kwargs):
"""
Plots the probability density function (PDF) of the
theoretical distribution for the values given in data within xmin and
xmax, if present. Plots to a new figure or to axis ax if provided.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
from numpy import unique
bins = unique(trim_to_range(data, xmin=self.xmin, xmax=self.xmax))
PDF = self.pdf(bins)
from numpy import nan
PDF[PDF==0] = nan
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, PDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, PDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def generate_random(self,n=1, estimate_discrete=None):
"""
Generates random numbers from the theoretical probability distribution.
If xmax is present, it is currently ignored.
Parameters
----------
n : int or float
The number of random numbers to generate
estimate_discrete : boolean
For discrete distributions, whether to use a faster approximation of
the random number generator. If None, attempts to inherit
the estimate_discrete behavior used for fitting from the Distribution
object or the parent Fit object, if present. Approximations only
exist for some distributions (namely the power law). If an
approximation does not exist an estimate_discrete setting of True
will not be inherited.
Returns
-------
r : array
Random numbers drawn from the distribution
"""
from numpy.random import rand
from numpy import array
r = rand(n)
if not self.discrete:
x = self._generate_random_continuous(r)
else:
if (estimate_discrete and not hasattr(self, '_generate_random_discrete_estimate') ):
raise AttributeError("This distribution does not have an "
"estimation of the discrete form for generating simulated "
"data. Try the exact form with estimate_discrete=False.")
if estimate_discrete is None:
if not hasattr(self, '_generate_random_discrete_estimate'):
estimate_discrete = False
elif hasattr(self, 'estimate_discrete'):
estimate_discrete = self.estimate_discrete
elif hasattr('parent_Fit'):
estimate_discrete = self.parent_Fit.estimate_discrete
else:
estimate_discrete = False
if estimate_discrete:
x = self._generate_random_discrete_estimate(r)
else:
x = array([self._double_search_discrete(R) for R in r],
dtype='float')
return x
def _double_search_discrete(self, r):
#Find a range from x1 to x2 that our random probability fits between
x2 = int(self.xmin)
while self.ccdf(data=[x2]) >= (1 - r):
x1 = x2
x2 = 2*x1
#Use binary search within that range to find the exact answer, up to
#the limit of being between two integers.
x = bisect_map(x1, x2, self.ccdf, 1-r)
return x
class Power_Law(Distribution):
def __init__(self, estimate_discrete=True, **kwargs):
self.estimate_discrete = estimate_discrete
Distribution.__init__(self, **kwargs)
def parameters(self, params):
self.alpha = params[0]
self.parameter1 = self.alpha
self.parameter1_name = 'alpha'
@property
def name(self):
return "power_law"
@property
def sigma(self):
#Only is calculable after self.fit is started, when the number of data points is
#established
from numpy import sqrt
return (self.alpha - 1) / sqrt(self.n)
def _in_standard_parameter_range(self):
return self.alpha>1
def fit(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
self.n = len(data)
from numpy import log, sum
if not self.discrete and not self.xmax:
self.alpha = 1 + (self.n / sum(log(data/self.xmin)))
if not self.in_range():
Distribution.fit(self, data, suppress_output=True)
self.KS(data)
elif self.discrete and self.estimate_discrete and not self.xmax:
self.alpha = 1 + (self.n / sum(log(data / (self.xmin - .5))))
if not self.in_range():
Distribution.fit(self, data, suppress_output=True)
self.KS(data)
else:
Distribution.fit(self, data, suppress_output=True)
if not self.in_range():
self.noise_flag=True
else:
self.noise_flag=False
def _initial_parameters(self, data):
from numpy import log, sum
return 1 + len(data)/sum(log(data / (self.xmin)))
def _cdf_base_function(self, x):
if self.discrete:
from scipy.special import zeta
CDF = 1 - zeta(self.alpha, x)
else:
#Can this be reformulated to not reference xmin? Removal of the probability
#before xmin and after xmax is handled in Distribution.cdf(), so we don't
#strictly need this element. It doesn't hurt, for the moment.
CDF = 1-(x/self.xmin)**(-self.alpha+1)
return CDF
def _pdf_base_function(self, x):
return x**-self.alpha
@property
def _pdf_continuous_normalizer(self):
return (self.alpha-1) * self.xmin**(self.alpha-1)
@property
def _pdf_discrete_normalizer(self):
C = 1.0 - self._cdf_xmin
if self.xmax:
C -= 1 - self._cdf_base_function(self.xmax+1)
C = 1.0/C
return C
def _generate_random_continuous(self, r):
return self.xmin * (1 - r) ** (-1/(self.alpha - 1))
def _generate_random_discrete_estimate(self, r):
x = (self.xmin - 0.5) * (1 - r) ** (-1/(self.alpha - 1)) + 0.5
from numpy import around
return around(x)
class Exponential(Distribution):
def parameters(self, params):
self.Lambda = params[0]
self.parameter1 = self.Lambda
self.parameter1_name = 'lambda'
@property
def name(self):
return "exponential"
def _initial_parameters(self, data):
from numpy import mean
return 1/mean(data)
def _in_standard_parameter_range(self):
return self.Lambda>0
def _cdf_base_function(self, x):
from numpy import exp
CDF = 1 - exp(-self.Lambda*x)
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return exp(-self.Lambda * x)
@property
def _pdf_continuous_normalizer(self):
from numpy import exp
return self.Lambda * exp(self.Lambda * self.xmin)
@property
def _pdf_discrete_normalizer(self):
from numpy import exp
C = (1 - exp(-self.Lambda)) * exp(self.Lambda * self.xmin)
if self.xmax:
Cxmax = (1 - exp(-self.Lambda)) * exp(self.Lambda * self.xmax)
C = 1.0/C - 1.0/Cxmax
C = 1.0/C
return C
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
likelihoods = self.Lambda*exp(self.Lambda*(self.xmin-data))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def loglikelihoods(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import log
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
loglikelihoods = log(self.Lambda) + (self.Lambda*(self.xmin-data))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
loglikelihoods[loglikelihoods==0] = log(10**float_info.min_10_exp)
else:
loglikelihoods = Distribution.loglikelihoods(self, data)
return loglikelihoods
def _generate_random_continuous(self, r):
from numpy import log
return self.xmin - (1/self.Lambda) * log(1-r)
class Stretched_Exponential(Distribution):
def parameters(self, params):
self.Lambda = params[0]
self.parameter1 = self.Lambda
self.parameter1_name = 'lambda'
self.beta = params[1]
self.parameter2 = self.beta
self.parameter2_name = 'beta'
@property
def name(self):
return "stretched_exponential"
def _initial_parameters(self, data):
from numpy import mean
return (1/mean(data), 1)
def _in_standard_parameter_range(self):
return self.Lambda>0 and self.beta>0
def _cdf_base_function(self, x):
from numpy import exp
CDF = 1 - exp(-(self.Lambda*x)**self.beta)
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return (((x*self.Lambda)**(self.beta-1)) *
exp(-((self.Lambda*x)**self.beta)))
@property
def _pdf_continuous_normalizer(self):
from numpy import exp
C = self.beta*self.Lambda*exp((self.Lambda*self.xmin)**self.beta)
return C
@property
def _pdf_discrete_normalizer(self):
return False
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
likelihoods = ((data*self.Lambda)**(self.beta-1) *
self.beta * self.Lambda *
exp((self.Lambda*self.xmin)**self.beta -
(self.Lambda*data)**self.beta))
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def loglikelihoods(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and not self.xmax:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import log
loglikelihoods = (
log((data*self.Lambda)**(self.beta-1) *
self.beta * self. Lambda) +
(self.Lambda*self.xmin)**self.beta -
(self.Lambda*data)**self.beta)
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
from numpy import inf
loglikelihoods[loglikelihoods==-inf] = log(10**float_info.min_10_exp)
else:
loglikelihoods = Distribution.loglikelihoods(self, data)
return loglikelihoods
def _generate_random_continuous(self, r):
from numpy import log
# return ( (self.xmin**self.beta) -
# (1/self.Lambda) * log(1-r) )**(1/self.beta)
return (1/self.Lambda)* ( (self.Lambda*self.xmin)**self.beta -
log(1-r) )**(1/self.beta)
class Truncated_Power_Law(Distribution):
def parameters(self, params):
self.alpha = params[0]
self.parameter1 = self.alpha
self.parameter1_name = 'alpha'
self.Lambda = params[1]
self.parameter2 = self.Lambda
self.parameter2_name = 'lambda'
@property
def name(self):
return "truncated_power_law"
def _initial_parameters(self, data):
from numpy import log, sum, mean
alpha = 1 + len(data)/sum( log( data / (self.xmin) ))
Lambda = 1/mean(data)
return (alpha, Lambda)
def _in_standard_parameter_range(self):
return self.Lambda>0 and self.alpha>1
def _cdf_base_function(self, x):
from mpmath import gammainc
from numpy import vectorize
gammainc = vectorize(gammainc)
CDF = ( (gammainc(1-self.alpha,self.Lambda*x)).astype('float') /
self.Lambda**(1-self.alpha)
)
CDF = 1 -CDF
return CDF
def _pdf_base_function(self, x):
from numpy import exp
return x**(-self.alpha) * exp(-self.Lambda * x)
@property
def _pdf_continuous_normalizer(self):
from mpmath import gammainc
C = ( self.Lambda**(1-self.alpha) /
float(gammainc(1-self.alpha,self.Lambda*self.xmin)))
return C
@property
def _pdf_discrete_normalizer(self):
if 0:
return False
from mpmath import lerchphi
from mpmath import exp # faster /here/ than numpy.exp
C = ( float(exp(self.xmin * self.Lambda) /
lerchphi(exp(-self.Lambda), self.alpha, self.xmin)) )
if self.xmax:
Cxmax = ( float(exp(self.xmax * self.Lambda) /
lerchphi(exp(-self.Lambda), self.alpha, self.xmax)) )
C = 1.0/C - 1.0/Cxmax
C = 1.0/C
return C
def pdf(self, data=None):
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
if not self.discrete and self.in_range() and False:
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
from numpy import exp
from mpmath import gammainc
# likelihoods = (data**-alpha)*exp(-Lambda*data)*\
# (Lambda**(1-alpha))/\
# float(gammainc(1-alpha,Lambda*xmin))
likelihoods = ( self.Lambda**(1-self.alpha) /
(data**self.alpha *
exp(self.Lambda*data) *
gammainc(1-self.alpha,self.Lambda*self.xmin)
).astype(float)
)
#Simplified so as not to throw a nan from infs being divided by each other
from sys import float_info
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
else:
likelihoods = Distribution.pdf(self, data)
return likelihoods
def _generate_random_continuous(self, r):
def helper(r):
from numpy import log
from numpy.random import rand
while 1:
x = self.xmin - (1/self.Lambda) * log(1-r)
p = ( x/self.xmin )**-self.alpha
if rand()<p:
return x
r = rand()
from numpy import array
return array(list(map(helper, r)))
class Lognormal(Distribution):
def parameters(self, params):
self.mu = params[0]
self.parameter1 = self.mu
self.parameter1_name = 'mu'
self.sigma = params[1]
self.parameter2 = self.sigma
self.parameter2_name = 'sigma'
@property
def name(self):
return "lognormal"
def pdf(self, data=None):
"""
Returns the probability density function (normalized histogram) of the
theoretical distribution for the values in data within xmin and xmax,
if present.
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
Returns
-------
probabilities : array
"""
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
from numpy import tile
if not self.in_range():
return tile(10**float_info.min_10_exp, n)
if not self.discrete:
f = self._pdf_base_function(data)
C = self._pdf_continuous_normalizer
if C > 0:
likelihoods = f/C
else:
likelihoods = tile(10**float_info.min_10_exp, n)
else:
if self._pdf_discrete_normalizer:
f = self._pdf_base_function(data)
C = self._pdf_discrete_normalizer
likelihoods = f*C
elif self.discrete_approximation=='round':
likelihoods = self._round_discrete_approx(data)
else:
if self.discrete_approximation=='xmax':
upper_limit = self.xmax
else:
upper_limit = self.discrete_approximation
# from mpmath import exp
from numpy import arange
X = arange(self.xmin, upper_limit+1)
PDF = self._pdf_base_function(X)
PDF = (PDF/sum(PDF)).astype(float)
likelihoods = PDF[(data-self.xmin).astype(int)]
likelihoods[likelihoods==0] = 10**float_info.min_10_exp
return likelihoods
def _round_discrete_approx(self, data):
"""
This function reformulates the calculation to avoid underflow errors
with the erf function. As implemented, erf(x) quickly approaches 1
while erfc(x) is more accurate. Since erfc(x) = 1 - erf(x),
calculations can be written using erfc(x)
"""
import numpy as np
import scipy.special as ss
""" Temporarily expand xmin and xmax to be able to grab the extra bit of
probability mass beyond the (integer) values of xmin and xmax
Note this is a design decision. One could also say this extra
probability "off the edge" of the distribution shouldn't be included,
and that implementation is retained below, commented out. Note, however,
that such a cliff means values right at xmin and xmax have half the width to
grab probability from, and thus are lower probability than they would otherwise
be. This is particularly concerning for values at xmin, which are typically
the most likely and greatly influence the distribution's fit.
"""
lower_data = data-.5
upper_data = data+.5
self.xmin -= .5
if self.xmax:
self.xmax += .5
# revised calculation written to avoid underflow errors
arg1 = (np.log(lower_data)-self.mu) / (np.sqrt(2)*self.sigma)
arg2 = (np.log(upper_data)-self.mu) / (np.sqrt(2)*self.sigma)
likelihoods = 0.5*(ss.erfc(arg1) - ss.erfc(arg2))
if not self.xmax:
norm = 0.5*ss.erfc((np.log(self.xmin)-self.mu) / (np.sqrt(2)*self.sigma))
else:
# may still need to be fixed
norm = - self._cdf_xmin + self._cdf_base_function(self.xmax)
self.xmin +=.5
if self.xmax:
self.xmax -= .5
return likelihoods/norm
def cdf(self, data=None, survival=False):
"""
The cumulative distribution function (CDF) of the lognormal
distribution. Calculated for the values given in data within xmin and
xmax, if present. Calculation was reformulated to avoid underflow
errors
Parameters
----------
data : list or array, optional
If not provided, attempts to use the data from the Fit object in
which the Distribution object is contained.
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True).
False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
from numpy import log, sqrt
import scipy.special as ss
if data is None and hasattr(self, 'parent_Fit'):
data = self.parent_Fit.data
data = trim_to_range(data, xmin=self.xmin, xmax=self.xmax)
n = len(data)
from sys import float_info
if not self.in_range():
from numpy import tile
return tile(10**float_info.min_10_exp, n)
val_data = (log(data)-self.mu) / (sqrt(2)*self.sigma)
val_xmin = (log(self.xmin)-self.mu) / (sqrt(2)*self.sigma)
CDF = 0.5 * (ss.erfc(val_xmin) - ss.erfc(val_data))
norm = 0.5 * ss.erfc(val_xmin)
if self.xmax:
# TO DO: Improve this line further for better numerical accuracy?
norm = norm - (1 - self._cdf_base_function(self.xmax))
CDF = CDF/norm
if survival:
CDF = 1 - CDF
possible_numerical_error = False
from numpy import isnan, min
if isnan(min(CDF)):
print("'nan' in fit cumulative distribution values.", file=sys.stderr)
possible_numerical_error = True
#if 0 in CDF or 1 in CDF:
# print("0 or 1 in fit cumulative distribution values.", file=sys.stderr)
# possible_numerical_error = True
if possible_numerical_error:
print("Likely underflow or overflow error: the optimal fit for this distribution gives values that are so extreme that we lack the numerical precision to calculate them.", file=sys.stderr)
return CDF
def _initial_parameters(self, data):
from numpy import mean, std, log
logdata = log(data)
return (mean(logdata), std(logdata))
def _in_standard_parameter_range(self):
#The standard deviation can't be negative
return self.sigma>0
def _cdf_base_function(self, x):
from numpy import sqrt, log
from scipy.special import erf
return 0.5 + ( 0.5 *
erf((log(x)-self.mu) / (sqrt(2)*self.sigma)))
def _pdf_base_function(self, x):
from numpy import exp, log
return ((1.0/x) *
exp(-( (log(x) - self.mu)**2 )/(2*self.sigma**2)))
@property
def _pdf_continuous_normalizer(self):
from mpmath import erfc
# from scipy.special import erfc
from scipy.constants import pi
from numpy import sqrt, log
C = (erfc((log(self.xmin) - self.mu) / (sqrt(2) * self.sigma)) /
sqrt(2/(pi*self.sigma**2)))
return float(C)
@property
def _pdf_discrete_normalizer(self):
return False
def _generate_random_continuous(self, r):
from numpy import exp, sqrt, log, frompyfunc
from mpmath import erf, erfinv
#This is a long, complicated function broken into parts.
#We use mpmath to maintain numerical accuracy as we run through
#erf and erfinv, until we get to more sane numbers. Thanks to
#Wolfram Alpha for producing the appropriate inverse of the CCDF
#for me, which is what we need to calculate these things.
erfinv = frompyfunc(erfinv,1,1)
Q = erf( ( log(self.xmin) - self.mu ) / (sqrt(2)*self.sigma))
Q = Q*r - r + 1.0
Q = erfinv(Q).astype('float')
return exp(self.mu + sqrt(2)*self.sigma*Q)
# def _generate_random_continuous(self, r1, r2=None):
# from numpy import log, sqrt, exp, sin, cos
# from scipy.constants import pi
# if r2==None:
# from numpy.random import rand
# r2 = rand(len(r1))
# r2_provided = False
# else:
# r2_provided = True
#
# rho = sqrt(-2.0 * self.sigma**2.0 * log(1-r1))
# theta = 2.0 * pi * r2
# x1 = exp(rho * sin(theta))
# x2 = exp(rho * cos(theta))
#
# if r2_provided:
# return x1, x2
# else:
# return x1
def nested_loglikelihood_ratio(loglikelihoods1, loglikelihoods2, **kwargs):
"""
Calculates a loglikelihood ratio and the p-value for testing which of two
probability distributions is more likely to have created a set of
observations. Assumes one of the probability distributions is a nested
version of the other.
Parameters
----------
loglikelihoods1 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
loglikelihoods2 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
nested : bool, optional
Whether one of the two probability distributions that generated the
likelihoods is a nested version of the other. True by default.
normalized_ratio : bool, optional
Whether to return the loglikelihood ratio, R, or the normalized
ratio R/sqrt(n*variance)
Returns
-------
R : float
The loglikelihood ratio of the two sets of likelihoods. If positive,
the first set of likelihoods is more likely (and so the probability
distribution that produced them is a better fit to the data). If
negative, the reverse is true.
p : float
The significance of the sign of R. If below a critical values
(typically .05) the sign of R is taken to be significant. If above the
critical value the sign of R is taken to be due to statistical
fluctuations.
"""
return loglikelihood_ratio(loglikelihoods1, loglikelihoods2,
nested=True, **kwargs)
def loglikelihood_ratio(loglikelihoods1, loglikelihoods2,
nested=False, normalized_ratio=False):
"""
Calculates a loglikelihood ratio and the p-value for testing which of two
probability distributions is more likely to have created a set of
observations.
Parameters
----------
loglikelihoods1 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
loglikelihoods2 : list or array
The logarithms of the likelihoods of each observation, calculated from
a particular probability distribution.
nested: bool, optional
Whether one of the two probability distributions that generated the
likelihoods is a nested version of the other. False by default.
normalized_ratio : bool, optional
Whether to return the loglikelihood ratio, R, or the normalized
ratio R/sqrt(n*variance)
Returns
-------
R : float
The loglikelihood ratio of the two sets of likelihoods. If positive,
the first set of likelihoods is more likely (and so the probability
distribution that produced them is a better fit to the data). If
negative, the reverse is true.
p : float
The significance of the sign of R. If below a critical values
(typically .05) the sign of R is taken to be significant. If above the
critical value the sign of R is taken to be due to statistical
fluctuations.
"""
from numpy import sqrt
from scipy.special import erfc
n = float(len(loglikelihoods1))
if n==0:
R = 0
p = 1
return R, p
from numpy import asarray
loglikelihoods1 = asarray(loglikelihoods1)
loglikelihoods2 = asarray(loglikelihoods2)
#Clean for extreme values, if any
from numpy import inf, log
from sys import float_info
min_val = log(10**float_info.min_10_exp)
loglikelihoods1[loglikelihoods1==-inf] = min_val
loglikelihoods2[loglikelihoods2==-inf] = min_val
R = sum(loglikelihoods1-loglikelihoods2)
from numpy import mean
mean_diff = mean(loglikelihoods1)-mean(loglikelihoods2)
variance = sum(
( (loglikelihoods1-loglikelihoods2) - mean_diff)**2
)/n
if nested:
from scipy.stats import chi2
p = 1 - chi2.cdf(abs(2*R), 1)
else:
p = erfc( abs(R) / sqrt(2*n*variance))
if normalized_ratio:
R = R/sqrt(n*variance)
return R, p
def cdf(data, survival=False, **kwargs):
"""
The cumulative distribution function (CDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). False by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return cumulative_distribution_function(data, survival=survival, **kwargs)
def ccdf(data, survival=True, **kwargs):
"""
The complementary cumulative distribution function (CCDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). True by default.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
return cumulative_distribution_function(data, survival=survival, **kwargs)
def cumulative_distribution_function(data,
xmin=None, xmax=None,
survival=False, **kwargs):
"""
The cumulative distribution function (CDF) of the data.
Parameters
----------
data : list or array, optional
survival : bool, optional
Whether to calculate a CDF (False) or CCDF (True). False by default.
xmin : int or float, optional
The minimum data size to include. Values less than xmin are excluded.
xmax : int or float, optional
The maximum data size to include. Values greater than xmin are
excluded.
Returns
-------
X : array
The sorted, unique values in the data.
probabilities : array
The portion of the data that is less than or equal to X.
"""
from numpy import array
data = array(data)
if not data.any():
from numpy import nan
return array([nan]), array([nan])
data = trim_to_range(data, xmin=xmin, xmax=xmax)
n = float(len(data))
from numpy import sort
data = sort(data)
all_unique = not( any( data[:-1]==data[1:] ) )
if all_unique:
from numpy import arange
CDF = arange(n)/n
else:
#This clever bit is a way of using searchsorted to rapidly calculate the
#CDF of data with repeated values comes from Adam Ginsburg's plfit code,
#specifically https://github.com/keflavich/plfit/commit/453edc36e4eb35f35a34b6c792a6d8c7e848d3b5#plfit/plfit.py
from numpy import searchsorted, unique
CDF = searchsorted(data, data,side='left')/n
unique_data, unique_indices = unique(data, return_index=True)
data=unique_data
CDF = CDF[unique_indices]
if survival:
CDF = 1-CDF
return data, CDF
def is_discrete(data):
"""Checks if every element of the array is an integer."""
from numpy import floor
return (floor(data)==data.astype(float)).all()
def trim_to_range(data, xmin=None, xmax=None, **kwargs):
"""
Removes elements of the data that are above xmin or below xmax (if present)
"""
from numpy import asarray
data = asarray(data)
if xmin:
data = data[data>=xmin]
if xmax:
data = data[data<=xmax]
return data
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
"""
Returns the probability density function (normalized histogram) of the
data.
Parameters
----------
data : list or array
xmin : float, optional
Minimum value of the PDF. If None, uses the smallest value in the data.
xmax : float, optional
Maximum value of the PDF. If None, uses the largest value in the data.
linear_bins : float, optional
Whether to use linearly spaced bins, as opposed to logarithmically
spaced bins (recommended for log-log plots).
Returns
-------
bin_edges : array
The edges of the bins of the probability density function.
probabilities : array
The portion of the data that is within the bin. Length 1 less than
bin_edges, as it corresponds to the spaces between them.
"""
from numpy import logspace, histogram, floor, unique
from math import ceil, log10
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = log10(xmin)
log_max_size = log10(xmax)
number_of_bins = ceil((log_max_size-log_min_size)*10)
bins=unique(
floor(
logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = histogram(data, bins, density=True)
return edges, hist
def checkunique(data):
"""Quickly checks if a sorted array is all unique elements."""
for i in range(len(data)-1):
if data[i]==data[i+1]:
return False
return True
#def checksort(data):
# """
# Checks if the data is sorted, in O(n) time. If it isn't sorted, it then
# sorts it in O(nlogn) time. Expectation is that the data will typically
# be sorted. Presently slower than numpy's sort, even on large arrays, and
# so is useless.
# """
#
# n = len(data)
# from numpy import arange
# if not all(data[i] <= data[i+1] for i in arange(n-1)):
# from numpy import sort
# data = sort(data)
# return data
def plot_ccdf(data, ax=None, survival=False, **kwargs):
return plot_cdf(data, ax=ax, survival=True, **kwargs)
"""
Plots the complementary cumulative distribution function (CDF) of the data
to a new figure or to axis ax if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). True by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
def plot_cdf(data, ax=None, survival=False, **kwargs):
"""
Plots the cumulative distribution function (CDF) of the data to a new
figure or to axis ax if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
survival : bool, optional
Whether to plot a CDF (False) or CCDF (True). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
bins, CDF = cdf(data, survival=survival, **kwargs)
if not ax:
import matplotlib.pyplot as plt
plt.plot(bins, CDF, **kwargs)
ax = plt.gca()
else:
ax.plot(bins, CDF, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def plot_pdf(data, ax=None, linear_bins=False, **kwargs):
"""
Plots the probability density function (PDF) to a new figure or to axis ax
if provided.
Parameters
----------
data : list or array
ax : matplotlib axis, optional
The axis to which to plot. If None, a new figure is created.
linear_bins : bool, optional
Whether to use linearly spaced bins (True) or logarithmically
spaced bins (False). False by default.
Returns
-------
ax : matplotlib axis
The axis to which the plot was made.
"""
edges, hist = pdf(data, linear_bins=linear_bins, **kwargs)
bin_centers = (edges[1:]+edges[:-1])/2.0
from numpy import nan
hist[hist==0] = nan
if not ax:
import matplotlib.pyplot as plt
plt.plot(bin_centers, hist, **kwargs)
ax = plt.gca()
else:
ax.plot(bin_centers, hist, **kwargs)
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def bisect_map(mn, mx, function, target):
"""
Uses binary search to find the target solution to a function, searching in
a given ordered sequence of integer values.
Parameters
----------
seq : list or array, monotonically increasing integers
function : a function that takes a single integer input, which monotonically
decreases over the range of seq.
target : the target value of the function
Returns
-------
value : the input value that yields the target solution. If there is no
exact solution in the input sequence, finds the nearest value k such that
function(k) <= target < function(k+1). This is similar to the behavior of
bisect_left in the bisect package. If even the first, leftmost value of seq
does not satisfy this condition, -1 is returned.
"""
if function([mn]) < target or function([mx]) > target:
return -1
while 1:
if mx==mn+1:
return mn
m = (mn + mx) / 2
value = function([m])[0]
if value > target:
mn = m
elif value < target:
mx = m
else:
return m
######################
#What follows are functional programming forms of the above code, which are more
#clunky and have somewhat less functionality. However, they are here if your
#really want them.
class Distribution_Fit(object):
def __init__(self, data, name, xmin, discrete=False, xmax=None, method='Likelihood', estimate_discrete=True):
self.data = data
self.discrete = discrete
self.xmin = xmin
self.xmax = xmax
self.method = method
self.name = name
self.estimate_discrete = estimate_discrete
return
def __getattr__(self, name):
param_names = {'lognormal': ('mu', 'sigma', None),
'exponential': ('Lambda', None, None),
'truncated_power_law': ('alpha', 'Lambda', None),
'power_law': ('alpha', None, None),
'negative_binomial': ('r', 'p', None),
'stretched_exponential': ('Lambda', 'beta', None),
'gamma': ('k', 'theta', None)}
param_names = param_names[self.name]
if name in param_names:
if name == param_names[0]:
setattr(self, name, self.parameter1)
elif name == param_names[1]:
setattr(self, name, self.parameter2)
elif name == param_names[2]:
setattr(self, name, self.parameter3)
return getattr(self, name)
elif name in ['parameters',
'parameter1_name',
'parameter1',
'parameter2_name',
'parameter2',
'parameter3_name',
'parameter3',
'loglikelihood']:
self.parameters, self.loglikelihood = distribution_fit(self.data, distribution=self.name, discrete=self.discrete,
xmin=self.xmin, xmax=self.xmax, search_method=self.method, estimate_discrete=self.estimate_discrete)
self.parameter1 = self.parameters[0]
if len(self.parameters) < 2:
self.parameter2 = None
else:
self.parameter2 = self.parameters[1]
if len(self.parameters) < 3:
self.parameter3 = None
else:
self.parameter3 = self.parameters[2]
self.parameter1_name = param_names[0]
self.parameter2_name = param_names[1]
self.parameter3_name = param_names[2]
if name == 'parameters':
return self.parameters
elif name == 'parameter1_name':
return self.parameter1_name
elif name == 'parameter2_name':
return self.parameter2_name
elif name == 'parameter3_name':
return self.parameter3_name
elif name == 'parameter1':
return self.parameter1
elif name == 'parameter2':
return self.parameter2
elif name == 'parameter3':
return self.parameter3
elif name == 'loglikelihood':
return self.loglikelihood
if name == 'D':
if self.name != 'power_law':
self.D = None
else:
self.D = power_law_ks_distance(self.data, self.parameter1, xmin=self.xmin, xmax=self.xmax, discrete=self.discrete)
return self.D
if name == 'p':
print("A p value outside of a loglihood ratio comparison to another candidate distribution is not currently supported.\n \
If your data set is particularly large and has any noise in it at all, using such statistical tools as the Monte Carlo method\n\
can lead to erroneous results anyway; the presence of the noise means the distribution will obviously not perfectly fit the\n\
candidate distribution, and the very large number of samples will make the Monte Carlo simulations very close to a perfect\n\
fit. As such, such a test will always fail, unless your candidate distribution perfectly describes all elements of the\n\
system, including the noise. A more helpful analysis is the comparison between multiple, specific candidate distributions\n\
(the loglikelihood ratio test), which tells you which is the best fit of these distributions.", file=sys.stderr)
self.p = None
return self.p
#
# elif name in ['power_law_loglikelihood_ratio',
# 'power_law_p']:
# pl_R, pl_p = distribution_compare(self.data, 'power_law', self.power_law.parameters, name, self.parameters, self.discrete, self.xmin, self.xmax)
# self.power_law_loglikelihood_ratio = pl_R
# self.power_law_p = pl_p
# if name=='power_law_loglikelihood_ratio':
# return self.power_law_loglikelihood_ratio
# if name=='power_law_p':
# return self.power_law_p
# elif name in ['truncated_power_law_loglikelihood_ratio',
# 'truncated_power_law_p']:
# tpl_R, tpl_p = distribution_compare(self.data, 'truncated_power_law', self.truncated_power_law.parameters, name, self.parameters, self.discrete, self.xmin, self.xmax)
# self.truncated_power_law_loglikelihood_ratio = tpl_R
# self.truncated_power_law_p = tpl_p
# if name=='truncated_power_law_loglikelihood_ratio':
# return self.truncated_power_law_loglikelihood_ratio
# if name=='truncated_power_law_p':
# return self.truncated_power_law_p
else:
raise AttributeError(name)
def distribution_fit(data, distribution='all', discrete=False, xmin=None, xmax=None, \
comparison_alpha=None, search_method='Likelihood', estimate_discrete=True):
from numpy import log
if distribution == 'negative_binomial' and not is_discrete(data):
print("Rounding to integer values for negative binomial fit.", file=sys.stderr)
from numpy import around
data = around(data)
discrete = True
#If we aren't given an xmin, calculate the best possible one for a power law. This can take awhile!
if xmin is None or xmin == 'find' or type(xmin) == tuple or type(xmin) == list:
print("Calculating best minimal value", file=sys.stderr)
if 0 in data:
print("Value 0 in data. Throwing out 0 values", file=sys.stderr)
data = data[data != 0]
xmin, D, alpha, loglikelihood, n_tail, noise_flag = find_xmin(data, discrete=discrete, xmax=xmax, search_method=search_method, estimate_discrete=estimate_discrete, xmin_range=xmin)
else:
alpha = None
if distribution == 'power_law' and alpha:
return [alpha], loglikelihood
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
xmax = float(xmax)
data = data[data <= xmax]
#Special case where we call distribution_fit multiple times to do all comparisons
if distribution == 'all':
print("Analyzing all distributions", file=sys.stderr)
print("Calculating power law fit", file=sys.stderr)
if alpha:
pl_parameters = [alpha]
else:
pl_parameters, loglikelihood = distribution_fit(data, 'power_law', discrete, xmin, xmax, search_method=search_method, estimate_discrete=estimate_discrete)
results = {}
results['xmin'] = xmin
results['xmax'] = xmax
results['discrete'] = discrete
results['fits'] = {}
results['fits']['power_law'] = (pl_parameters, loglikelihood)
print("Calculating truncated power law fit", file=sys.stderr)
tpl_parameters, loglikelihood, R, p = distribution_fit(data, 'truncated_power_law', discrete, xmin, xmax, comparison_alpha=pl_parameters[0], search_method=search_method, estimate_discrete=estimate_discrete)
results['fits']['truncated_power_law'] = (tpl_parameters, loglikelihood)
results['power_law_comparison'] = {}
results['power_law_comparison']['truncated_power_law'] = (R, p)
results['truncated_power_law_comparison'] = {}
supported_distributions = ['exponential', 'lognormal', 'stretched_exponential', 'gamma']
for i in supported_distributions:
print("Calculating %s fit" % (i,), file=sys.stderr)
parameters, loglikelihood, R, p = distribution_fit(data, i, discrete, xmin, xmax, comparison_alpha=pl_parameters[0], search_method=search_method, estimate_discrete=estimate_discrete)
results['fits'][i] = (parameters, loglikelihood)
results['power_law_comparison'][i] = (R, p)
R, p = distribution_compare(data, 'truncated_power_law', tpl_parameters, i, parameters, discrete, xmin, xmax)
results['truncated_power_law_comparison'][i] = (R, p)
return results
#Handle edge case where we don't have enough data
no_data = False
if xmax and all((data > xmax) + (data < xmin)):
#Everything is beyond the bounds of the xmax and xmin
no_data = True
if all(data < xmin):
no_data = True
if len(data) < 2:
no_data = True
if no_data:
from numpy import array
from sys import float_info
parameters = array([0, 0, 0])
if search_method == 'Likelihood':
loglikelihood = -10 ** float_info.max_10_exp
if search_method == 'KS':
loglikelihood = 1
if comparison_alpha is None:
return parameters, loglikelihood
R = 10 ** float_info.max_10_exp
p = 1
return parameters, loglikelihood, R, p
n = float(len(data))
#Initial search parameters, estimated from the data
# print("Calculating initial parameters for search", file=sys.stderr)
if distribution == 'power_law' and not alpha:
initial_parameters = [1 + n / sum(log(data / (xmin)))]
elif distribution == 'exponential':
from numpy import mean
initial_parameters = [1 / mean(data)]
elif distribution == 'stretched_exponential':
from numpy import mean
initial_parameters = [1 / mean(data), 1]
elif distribution == 'truncated_power_law':
from numpy import mean
initial_parameters = [1 + n / sum(log(data / xmin)), 1 / mean(data)]
elif distribution == 'lognormal':
from numpy import mean, std
logdata = log(data)
initial_parameters = [mean(logdata), std(logdata)]
elif distribution == 'negative_binomial':
initial_parameters = [1, .5]
elif distribution == 'gamma':
from numpy import mean
initial_parameters = [n / sum(log(data / xmin)), mean(data)]
if search_method == 'Likelihood':
# print("Searching using maximum likelihood method", file=sys.stderr)
#If the distribution is a continuous power law without an xmax, and we're using the maximum likelihood method, we can compute the parameters and likelihood directly
if distribution == 'power_law' and not discrete and not xmax and not alpha:
from numpy import array, nan
alpha = 1 + n /\
sum(log(data / xmin))
loglikelihood = n * log(alpha - 1.0) - n * log(xmin) - alpha * sum(log(data / xmin))
if loglikelihood == nan:
loglikelihood = 0
parameters = array([alpha])
return parameters, loglikelihood
elif distribution == 'power_law' and discrete and not xmax and not alpha and estimate_discrete:
from numpy import array, nan
alpha = 1 + n /\
sum(log(data / (xmin - .5)))
loglikelihood = n * log(alpha - 1.0) - n * log(xmin) - alpha * sum(log(data / xmin))
if loglikelihood == nan:
loglikelihood = 0
parameters = array([alpha])
return parameters, loglikelihood
#Otherwise, we set up a likelihood function
likelihood_function = likelihood_function_generator(distribution, discrete=discrete, xmin=xmin, xmax=xmax)
#Search for the best fit parameters for the target distribution, on this data
from scipy.optimize import fmin
parameters, negative_loglikelihood, iter, funcalls, warnflag, = \
fmin(
lambda p: -sum(log(likelihood_function(p, data))),
initial_parameters, full_output=1, disp=False)
loglikelihood = -negative_loglikelihood
if comparison_alpha:
R, p = distribution_compare(data, 'power_law', [comparison_alpha], distribution, parameters, discrete, xmin, xmax)
return parameters, loglikelihood, R, p
else:
return parameters, loglikelihood
elif search_method == 'KS':
print("Not yet supported. Sorry.", file=sys.stderr)
return
# #Search for the best fit parameters for the target distribution, on this data
# from scipy.optimize import fmin
# parameters, KS, iter, funcalls, warnflag, = \
# fmin(\
# lambda p: -sum(log(likelihood_function(p, data))),\
# initial_parameters, full_output=1, disp=False)
# loglikelihood =-negative_loglikelihood
#
# if comparison_alpha:
# R, p = distribution_compare(data, 'power_law',[comparison_alpha], distribution, parameters, discrete, xmin, xmax)
# return parameters, loglikelihood, R, p
# else:
# return parameters, loglikelihood
def distribution_compare(data, distribution1, parameters1,
distribution2, parameters2,
discrete, xmin, xmax, nested=None, **kwargs):
no_data = False
if xmax and all((data > xmax) + (data < xmin)):
#Everything is beyond the bounds of the xmax and xmin
no_data = True
if all(data < xmin):
no_data = True
if no_data:
R = 0
p = 1
return R, p
likelihood_function1 = likelihood_function_generator(distribution1, discrete, xmin, xmax)
likelihood_function2 = likelihood_function_generator(distribution2, discrete, xmin, xmax)
likelihoods1 = likelihood_function1(parameters1, data)
likelihoods2 = likelihood_function2(parameters2, data)
if ((distribution1 in distribution2) or
(distribution2 in distribution1)
and nested is None):
print("Assuming nested distributions", file=sys.stderr)
nested = True
from numpy import log
R, p = loglikelihood_ratio(log(likelihoods1), log(likelihoods2),
nested=nested, **kwargs)
return R, p
def likelihood_function_generator(distribution_name, discrete=False, xmin=1, xmax=None):
if distribution_name == 'power_law':
likelihood_function = lambda parameters, data:\
power_law_likelihoods(
data, parameters[0], xmin, xmax, discrete)
elif distribution_name == 'exponential':
likelihood_function = lambda parameters, data:\
exponential_likelihoods(
data, parameters[0], xmin, xmax, discrete)
elif distribution_name == 'stretched_exponential':
likelihood_function = lambda parameters, data:\
stretched_exponential_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'truncated_power_law':
likelihood_function = lambda parameters, data:\
truncated_power_law_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'lognormal':
likelihood_function = lambda parameters, data:\
lognormal_likelihoods(
data, parameters[0], parameters[1], xmin, xmax, discrete)
elif distribution_name == 'negative_binomial':
likelihood_function = lambda parameters, data:\
negative_binomial_likelihoods(
data, parameters[0], parameters[1], xmin, xmax)
elif distribution_name == 'gamma':
likelihood_function = lambda parameters, data:\
gamma_likelihoods(
data, parameters[0], parameters[1], xmin, xmax)
return likelihood_function
def find_xmin(data, discrete=False, xmax=None, search_method='Likelihood', return_all=False, estimate_discrete=True, xmin_range=None):
from numpy import sort, unique, asarray, argmin, vstack, arange, sqrt
if 0 in data:
print("Value 0 in data. Throwing out 0 values", file=sys.stderr)
data = data[data != 0]
if xmax:
data = data[data <= xmax]
#Much of the rest of this function was inspired by Adam Ginsburg's plfit code, specifically around lines 131-143 of this version: http://code.google.com/p/agpy/source/browse/trunk/plfit/plfit.py?spec=svn359&r=357
if not all(data[i] <= data[i + 1] for i in range(len(data) - 1)):
data = sort(data)
if xmin_range == 'find' or xmin_range is None:
possible_xmins = data
else:
possible_xmins = data[data <= max(xmin_range)]
possible_xmins = possible_xmins[possible_xmins >= min(xmin_range)]
xmins, xmin_indices = unique(possible_xmins, return_index=True)
xmins = xmins[:-1]
if len(xmins) < 2:
from sys import float_info
xmin = 1
D = 1
alpha = 0
loglikelihood = -10 ** float_info.max_10_exp
n_tail = 1
noise_flag = True
Ds = 1
alphas = 0
sigmas = 1
if not return_all:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag
else:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag, xmins, Ds, alphas, sigmas
xmin_indices = xmin_indices[:-1] # Don't look at last xmin, as that's also the xmax, and we want to at least have TWO points to fit!
if search_method == 'Likelihood':
alpha_MLE_function = lambda xmin: distribution_fit(data, 'power_law', xmin=xmin, xmax=xmax, discrete=discrete, search_method='Likelihood', estimate_discrete=estimate_discrete)
fits = asarray(list(map(alpha_MLE_function, xmins)))
elif search_method == 'KS':
alpha_KS_function = lambda xmin: distribution_fit(data, 'power_law', xmin=xmin, xmax=xmax, discrete=discrete, search_method='KS', estimate_discrete=estimate_discrete)[0]
fits = asarray(list(map(alpha_KS_function, xmins)))
params = fits[:, 0]
alphas = vstack(params)[:, 0]
loglikelihoods = fits[:, 1]
ks_function = lambda index: power_law_ks_distance(data, alphas[index], xmins[index], xmax=xmax, discrete=discrete)
Ds = asarray(list(map(ks_function, arange(len(xmins)))))
sigmas = (alphas - 1) / sqrt(len(data) - xmin_indices + 1)
good_values = sigmas < .1
#Find the last good value (The first False, where sigma > .1):
xmin_max = argmin(good_values)
if good_values.all(): # If there are no fits beyond the noise threshold
min_D_index = argmin(Ds)
noise_flag = False
elif xmin_max > 0:
min_D_index = argmin(Ds[:xmin_max])
noise_flag = False
else:
min_D_index = argmin(Ds)
noise_flag = True
xmin = xmins[min_D_index]
D = Ds[min_D_index]
alpha = alphas[min_D_index]
loglikelihood = loglikelihoods[min_D_index]
n_tail = sum(data >= xmin)
if not return_all:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag
else:
return xmin, D, alpha, loglikelihood, n_tail, noise_flag, xmins, Ds, alphas, sigmas
def power_law_ks_distance(data, alpha, xmin, xmax=None, discrete=False, kuiper=False):
from numpy import arange, sort, mean
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
n = float(len(data))
if n < 2:
if kuiper:
return 1, 1, 2
return 1
if not all(data[i] <= data[i + 1] for i in arange(n - 1)):
data = sort(data)
if not discrete:
Actual_CDF = arange(n) / n
Theoretical_CDF = 1 - (data / xmin) ** (-alpha + 1)
if discrete:
from scipy.special import zeta
if xmax:
bins, Actual_CDF = cumulative_distribution_function(data,xmin=xmin,xmax=xmax)
Theoretical_CDF = 1 - ((zeta(alpha, bins) - zeta(alpha, xmax+1)) /\
(zeta(alpha, xmin)-zeta(alpha,xmax+1)))
if not xmax:
bins, Actual_CDF = cumulative_distribution_function(data,xmin=xmin)
Theoretical_CDF = 1 - (zeta(alpha, bins) /\
zeta(alpha, xmin))
D_plus = max(Theoretical_CDF - Actual_CDF)
D_minus = max(Actual_CDF - Theoretical_CDF)
Kappa = 1 + mean(Theoretical_CDF - Actual_CDF)
if kuiper:
return D_plus, D_minus, Kappa
D = max(D_plus, D_minus)
return D
def power_law_likelihoods(data, alpha, xmin, xmax=False, discrete=False):
if alpha < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
if not discrete:
likelihoods = (data ** -alpha) *\
((alpha - 1) * xmin ** (alpha - 1))
if discrete:
if alpha < 1:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
if not xmax:
from scipy.special import zeta
likelihoods = (data ** -alpha) /\
zeta(alpha, xmin)
if xmax:
from scipy.special import zeta
likelihoods = (data ** -alpha) /\
(zeta(alpha, xmin) - zeta(alpha, xmax + 1))
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def negative_binomial_likelihoods(data, r, p, xmin=0, xmax=False):
#Better to make this correction earlier on in distribution_fit, so as to not recheck for discreteness and reround every time fmin is used.
#if not is_discrete(data):
# print("Rounding to nearest integer values for negative binomial fit.", file=sys.stderr)
# from numpy import around
# data = around(data)
xmin = float(xmin)
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import asarray
from scipy.misc import comb
pmf = lambda k: comb(k + r - 1, k) * (1 - p) ** r * p ** k
likelihoods = asarray(list(map(pmf, data))).flatten()
if xmin != 0 or xmax:
xmax = max(data)
from numpy import arange
normalization_constant = sum(list(map(pmf, arange(xmin, xmax + 1))))
likelihoods = likelihoods / normalization_constant
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def exponential_likelihoods(data, Lambda, xmin, xmax=False, discrete=False):
if Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
# likelihoods = exp(-Lambda*data)*\
# Lambda*exp(Lambda*xmin)
likelihoods = Lambda * exp(Lambda * (xmin - data)) # Simplified so as not to throw a nan from infs being divided by each other
if discrete:
if not xmax:
likelihoods = exp(-Lambda * data) *\
(1 - exp(-Lambda)) * exp(Lambda * xmin)
if xmax:
likelihoods = exp(-Lambda * data) * (1 - exp(-Lambda))\
/ (exp(-Lambda * xmin) - exp(-Lambda * (xmax + 1)))
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def stretched_exponential_likelihoods(data, Lambda, beta, xmin, xmax=False, discrete=False):
if Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
# likelihoods = (data**(beta-1) * exp(-Lambda*(data**beta)))*\
# (beta*Lambda*exp(Lambda*(xmin**beta)))
likelihoods = data ** (beta - 1) * beta * Lambda * exp(Lambda * (xmin ** beta - data ** beta)) # Simplified so as not to throw a nan from infs being divided by each other
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = X ** (beta - 1) * beta * Lambda * exp(Lambda * (xmin ** beta - X ** beta)) # Simplified so as not to throw a nan from infs being divided by each other
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def gamma_likelihoods(data, k, theta, xmin, xmax=False, discrete=False):
if k <= 0 or theta <= 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
from mpmath import gammainc
# from scipy.special import gamma, gammainc #Not NEARLY numerically accurate enough for the job
if not discrete:
likelihoods = (data ** (k - 1)) / (exp(data / theta) * (theta ** k) * float(gammainc(k)))
#Calculate how much probability mass is beyond xmin, and normalize by it
normalization_constant = 1 - float(gammainc(k, 0, xmin / theta, regularized=True)) # Mpmath's regularized option divides by gamma(k)
likelihoods = likelihoods / normalization_constant
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = (X ** (k - 1)) / (exp(X / theta) * (theta ** k) * float(gammainc(k)))
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def truncated_power_law_likelihoods(data, alpha, Lambda, xmin, xmax=False, discrete=False):
if alpha < 0 or Lambda < 0:
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
from numpy import exp
if not discrete:
from mpmath import gammainc
# from scipy.special import gamma, gammaincc #Not NEARLY accurate enough to do the job
# likelihoods = (data**-alpha)*exp(-Lambda*data)*\
# (Lambda**(1-alpha))/\
# float(gammaincc(1-alpha,Lambda*xmin))
#Simplified so as not to throw a nan from infs being divided by each other
likelihoods = (Lambda ** (1 - alpha)) /\
((data ** alpha) * exp(Lambda * data) * gammainc(1 - alpha, Lambda * xmin)).astype(float)
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange
X = arange(xmin, xmax + 1)
PDF = (X ** -alpha) * exp(-Lambda * X)
PDF = PDF / sum(PDF)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
def lognormal_likelihoods(data, mu, sigma, xmin, xmax=False, discrete=False):
from numpy import log
if sigma <= 0 or mu < log(xmin):
#The standard deviation can't be negative, and the mean of the logarithm of the distribution can't be smaller than the log of the smallest member of the distribution!
from numpy import tile
from sys import float_info
return tile(10 ** float_info.min_10_exp, len(data))
data = data[data >= xmin]
if xmax:
data = data[data <= xmax]
if not discrete:
from numpy import sqrt, exp
# from mpmath import erfc
from scipy.special import erfc
from scipy.constants import pi
likelihoods = (1.0 / data) * exp(-((log(data) - mu) ** 2) / (2 * sigma ** 2)) *\
sqrt(2 / (pi * sigma ** 2)) / erfc((log(xmin) - mu) / (sqrt(2) * sigma))
# likelihoods = likelihoods.astype(float)
if discrete:
if not xmax:
xmax = max(data)
if xmax:
from numpy import arange, exp
# from mpmath import exp
X = arange(xmin, xmax + 1)
# PDF_function = lambda x: (1.0/x)*exp(-( (log(x) - mu)**2 ) / 2*sigma**2)
# PDF = asarray(list(map(PDF_function,X)))
PDF = (1.0 / X) * exp(-((log(X) - mu) ** 2) / (2 * (sigma ** 2)))
PDF = (PDF / sum(PDF)).astype(float)
likelihoods = PDF[(data - xmin).astype(int)]
from sys import float_info
likelihoods[likelihoods == 0] = 10 ** float_info.min_10_exp
return likelihoods
| gpl-3.0 |
kdheepak89/RTDT | RTDT/transit.py | 1 | 13035 | from __future__ import print_function
from google.transit import gtfs_realtime_pb2
import requests
import pandas as pd
from io import BytesIO
import zipfile
import os
import os.path
import datetime
import math
UTC_OFFSET = int(os.getenv('OFFSET', 7))
DEFAULT_LOCATION = {u'lat': 39.7433814, u'lng': -104.98910989999999}
def get_gtfs_data(force=False):
url = 'http://www.rtd-denver.com/GoogleFeeder/google_transit_Apr16_Runboard.zip'
headers_file = 'google_feeder_headers.txt'
last_modified = requests.head(url).headers['Date']
rerequest = False
if not os.path.isfile(headers_file):
rerequest = True
else:
with open(headers_file) as f:
f_line = f.read()
if last_modified not in f_line:
print("File are not the same, submit rerequest")
if force:
rerequest=True
else:
print("File unchanged!")
if not os.path.isfile('stops.txt') or not os.path.isfile('trips.txt'):
rerequest = True
print("Files missing")
if rerequest:
print("Re-requesting data")
request = requests.get(url)
z = zipfile.ZipFile(BytesIO(request.content))
z.extractall()
with open(headers_file, 'w') as f:
print(last_modified, file=f)
return z
def convert_df_to_list(df):
# bus20_east_list = [str(i) for i in bus20_east_df['trip_id'].tolist()]
# bus20_west_list = [str(i) for i in bus20_west_df['trip_id'].tolist()]
return([str(i) for i in df['trip_id'].tolist()])
def get_real_time_data_request_response(header=False):
if header:
r = requests.head('http://www.rtd-denver.com/google_sync/TripUpdate.pb', auth=(os.getenv('RTD_USERNAME'), os.getenv('RTD_PASSWORD')))
return(r.headers)
else:
r = requests.get('http://www.rtd-denver.com/google_sync/TripUpdate.pb', auth=(os.getenv('RTD_USERNAME'), os.getenv('RTD_PASSWORD')))
if r.ok:
return(r.content)
else:
return None
def get_entities(bus_list):
feed = gtfs_realtime_pb2.FeedMessage()
content = get_real_time_data_request_response()
feed.ParseFromString(content)
list_entities = []
for entity in feed.entity:
if entity.trip_update.trip.trip_id in bus_list:
list_entities.append(entity)
print("Getting entities")
print(list_entities)
return(list_entities)
def get_markers_for_list_entities(list_entities, stops_df, current_location=DEFAULT_LOCATION, trips_df=None):
if trips_df is None:
trips_df = pd.read_csv('trips.txt')
marker = []
for entity in list_entities:
stop_time_update = entity.trip_update.stop_time_update[0]
delay = stop_time_update.departure.delay
uncertainty = stop_time_update.departure.uncertainty
dt = datetime.datetime.fromtimestamp(stop_time_update.departure.time)
dt = dt - datetime.timedelta(hours=UTC_OFFSET)
departure_time = dt.strftime('%H:%M')
closest_stop_time = 0
closest_stop_name = ''
trip_id = entity.trip_update.trip.trip_id
route_id, route_name = get_bus_name(trip_id, trips_df)
lat, lon, stop_name = get_location_of_stop_time_update(stop_time_update)
marker.append((lat, lon, stop_name, departure_time, delay, uncertainty, closest_stop_time, closest_stop_name, route_id, route_name, trip_id))
return marker
def get_location_of_stop_time_update(stop_time_update):
stops_df = pd.read_csv('stops.txt')
lat = stops_df[stops_df['stop_id']==int(stop_time_update.stop_id)]['stop_lat'].iloc[0]
lon = stops_df[stops_df['stop_id']==int(stop_time_update.stop_id)]['stop_lon'].iloc[0]
stop_name = stops_df[stops_df['stop_id']==int(stop_time_update.stop_id)]['stop_name'].iloc[0].replace('[ X Stop ]', '')
return lat, lon, stop_name
def get_stop_location_list(stop_time_update):
list_stop_location = []
for stop_time in stop_time_update:
lat, lon, stop_name = get_location_of_stop_time_update(stop_time)
arrival_time_dt = datetime.datetime.fromtimestamp(stop_time.arrival.time) - datetime.timedelta(hours=UTC_OFFSET)
arrival_time = arrival_time_dt.strftime('%H:%M')
if stop_time.arrival.delay != 0 or stop_time.arrival.uncertainty !=0:
arrival_time = arrival_time + '\nwith a delay of {} with uncertainty {}'
departure_time_dt = datetime.datetime.fromtimestamp(stop_time.departure.time) - datetime.timedelta(hours=UTC_OFFSET)
departure_time = departure_time_dt.strftime('%H:%M')
if stop_time.departure.delay != 0 or stop_time.departure.uncertainty !=0:
departure_time = departure_time + '\nwith a delay of {} with uncertainty {}'
list_stop_location.append({'lat': lat, 'lng': lon, 'stop_name': stop_name, 'departure_time': departure_time, 'arrival_time': arrival_time})
return list_stop_location
def get_closest_stop_time(closest_stop_id, entity):
for stop_time_update in entity.trip_update.stop_time_update:
if int(stop_time_update.stop_id) == int(closest_stop_id):
dt = datetime.datetime.fromtimestamp(stop_time_update.departure.time)
dt = dt - datetime.timedelta(hours=UTC_OFFSET)
departure_time = dt.strftime('%H:%M')
return(departure_time)
def get_stop_name(stop_id, stops_df):
return(stops_df.loc[stops_df['stop_id'] == 10277]['stop_name'].values[0])
def find_closest_stop(stops_df, latlon, stop_id_list):
lat = latlon[0]
lon = latlon[1]
stops_df = stops_df[stops_df['stop_id'].isin(stop_id_list)]
stops_df['minimum_distance'] = (stops_df['stop_lat'] - lat)**2 + (stops_df['stop_lon'] - lon)**2
closest_stop_id = stops_df.loc[stops_df['minimum_distance'].argmin()]['stop_id']
return closest_stop_id
def get_bus_name(trip_id, trips_df):
return(trips_df[trips_df['trip_id'] == int(trip_id)]['route_id'].values[0],
trips_df[trips_df['trip_id'] == int(trip_id)]['trip_headsign'].values[0])
def get_stop_id_list(entity):
stop_id_list = []
for sq in entity.trip_update.stop_time_update:
stop_id_list.append(int(sq.stop_id))
return stop_id_list
def get_bus_list(trips_df):
trips_df['unique_route_id'] = trips_df['route_id']+': '+trips_df['trip_headsign']
bl = trips_df['unique_route_id'].unique()
return(bl.tolist())
def get_location_of_routes(l):
routePaths = {}
for entity in l:
trip_id = entity.trip_update.trip.trip_id
routePaths[trip_id] = get_stop_location_list(entity.trip_update.stop_time_update)
return routePaths
def get_all_current_position_markers(route, current_location=DEFAULT_LOCATION):
stops_df = pd.read_csv('stops.txt')
l = get_currently_active_trips(route)
print("Inside get all current position markers")
print(l)
markers = {route: get_markers_for_list_entities(l, stops_df, current_location)}
routePaths = get_location_of_routes(l)
data = {'markers': markers,
'routePaths': routePaths }
return(data)
def parse_route_name(route):
route_id = route.split(':')[0].replace('Route ', '').strip(' ')
trip_headsign = route.split(':')[1].strip(' ')
return route_id, trip_headsign
def get_trip_id(route, trips_df):
dt = datetime.datetime.now()
dt = dt - datetime.timedelta(hours=4)
dt.isoweekday()
saturday = dt.isoweekday() == 6
sunday = dt.isoweekday() == 7
if saturday:
trips_df = trips_df[trips_df['service_id'] == 'SA']
elif sunday:
trips_df = trips_df[trips_df['service_id'] == 'SU']
else: # weekday:
trips_df = trips_df[trips_df['service_id'] == 'WK']
trips_df['unique_route_id'] = 'Route ' + trips_df['route_id']+': '+trips_df['trip_headsign']
route_id, trip_headsign = parse_route_name(route)
trips_df = trips_df[trips_df['route_id'] == route_id]
trips_df = trips_df[trips_df['trip_headsign'] == trip_headsign]
return trips_df['trip_id'].tolist()
def get_currently_active_trips(route):
trips_df = pd.read_csv('trips.txt')
total_trip_list = [str(item) for item in get_trip_id(route, trips_df)]
print("total trip list is ")
print(total_trip_list)
return get_entities(total_trip_list)
def get_route_name(trip_id):
df = pd.read_csv('./trips.txt')
route_df = df[df['trip_id'] == int(trip_id)]
return(str(route_df['route_id'].values[0]) + ': ' + route_df['trip_headsign'].values[0])
def get_route_data(trip_id):
return({
'stop_time_update': get_stop_time_update(trip_id),
'route_name': get_route_name(trip_id),
})
def get_stop_time_update(trip_id):
feed = gtfs_realtime_pb2.FeedMessage()
content = get_real_time_data_request_response()
feed.ParseFromString(content)
list_entities = []
realtime_entity_list = feed.entity
for trip in realtime_entity_list:
if trip.trip_update.trip.trip_id == str(trip_id):
return([stop_time_update_to_dict(stu) for stu in trip.trip_update.stop_time_update])
def stop_time_update_to_dict(stu):
lat, lon, stop_name = get_location_of_stop_time_update(stu)
return({
'location': [lat, lon],
'stop_name': stop_name,
'stop_sequence': stu.stop_sequence,
'arrival': {
'time': time_convert(stu.arrival.time),
'uncertainty': stu.arrival.uncertainty,
'delay': stu.arrival.delay
},
'departure': {
'time': time_convert(stu.departure.time),
'uncertainty': stu.departure.uncertainty,
'delay': stu.departure.delay
},
'schedule_relationship': 'SCHEDULED' if stu.schedule_relationship==0 else 'UNKNOWN'
})
def time_convert(t):
dt = datetime.datetime.fromtimestamp(t)
dt = dt - datetime.timedelta(hours=UTC_OFFSET)
departure_time = dt.strftime('%H:%M')
return(departure_time)
def get_trip_ids(route_id, trip_headsign):
trips_df = pd.read_csv("./trips.txt")
route_df = trips_df[(trips_df['route_id'] == str(route_id)) & (trips_df['trip_headsign'] == trip_headsign)]
feed = gtfs_realtime_pb2.FeedMessage()
content = get_real_time_data_request_response()
feed.ParseFromString(content)
list_entities = []
realtime_entity_list = feed.entity
current_routes = []
for trip in realtime_entity_list:
if any(route_df['trip_id'] == int(trip.trip_update.trip.trip_id)):
lat, lon, stop_name = get_location_of_stop_time_update(trip.trip_update.stop_time_update[0])
current_routes.append({
'trip_name': route_id + ": " + trip_headsign,
'trip_id': int(trip.trip_update.trip.trip_id),
'location': [lat, lon],
'current_location': stop_name,
'expected_departure': time_convert(trip.trip_update.stop_time_update[0].departure.time)
})
return(current_routes)
def distance_on_unit_sphere(x, lat2, long2):
lat1 = x['stop_lat']
long1 = x['stop_lon']
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta', phi')
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return 3959 * arc
def list_of_closest_buses(lat, lng):
trips_df = pd.read_csv('./trips.txt')
stop_df = pd.read_csv('./stops.txt')
stop_df['proximity'] = stop_df.apply(distance_on_unit_sphere, args=(lat,lng), axis=1)
stop_df = stop_df.sort_values(by='proximity')
stop_times_df = pd.read_csv('./stop_times.txt')
unique_bus_names = []
i = 0
while len(unique_bus_names) < 5:
build_bus_name_list(unique_bus_names, stop_df, stop_times_df, trips_df, i)
i = i+1
return(unique_bus_names)
def build_bus_name_list(unique_bus_names, stop_df, stop_times_df, trips_df, i):
stop_id = stop_df.iloc[i]['stop_id']
def match_bus_name(x):
route_id, route_name = get_bus_name(x['trip_id'], trips_df)
return(route_id + ": " + route_name)
for item in stop_times_df[stop_times_df['stop_id'] == stop_id].apply(match_bus_name, axis=1).unique():
if item not in unique_bus_names:
unique_bus_names.append(item)
| bsd-3-clause |
yyjiang/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
MobleyLab/SAMPL6 | host_guest/Analysis/Scripts/analyze_sampling.py | 1 | 116143 | #!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import collections
import copy
import itertools
import json
import math
import os
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
from matplotlib import pyplot as plt
from pkganalysis.stats import mean_confidence_interval
from pkganalysis.sampling import (SamplingSubmission, YankSamplingAnalysis,
YANK_N_ITERATIONS, DG_KEY, DDG_KEY, export_dictionary)
from pkganalysis.submission import (load_submissions)
# =============================================================================
# CONSTANTS
# =============================================================================
YANK_METHOD_PAPER_NAME = 'OpenMM/HREX'
# Paths to input data.
SAMPLING_SUBMISSIONS_DIR_PATH = '../SubmissionsDoNotUpload/975/'
YANK_ANALYSIS_DIR_PATH = 'YankAnalysis/Sampling/'
SAMPLING_ANALYSIS_DIR_PATH = '../SAMPLing/'
SAMPLING_DATA_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Data')
SAMPLING_PLOT_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'Plots')
SAMPLING_PAPER_DIR_PATH = os.path.join(SAMPLING_ANALYSIS_DIR_PATH, 'PaperImages')
# All system ids.
SYSTEM_IDS = [
'CB8-G3-0', 'CB8-G3-1', 'CB8-G3-2', 'CB8-G3-3', 'CB8-G3-4',
'OA-G3-0', 'OA-G3-1', 'OA-G3-2', 'OA-G3-3', 'OA-G3-4',
'OA-G6-0', 'OA-G6-1', 'OA-G6-2', 'OA-G6-3', 'OA-G6-4'
]
# Kelly's colors for maximum contrast.
# "gray95", "gray13", "gold2", "plum4", "darkorange1", "lightskyblue2", "firebrick", "burlywood3", "gray51", "springgreen4", "lightpink2", "deepskyblue4", "lightsalmon2", "mediumpurple4", "orange", "maroon", "yellow3", "brown4", "yellow4", "sienna4", "chocolate", "gray19"
KELLY_COLORS = ['#F2F3F4', '#222222', '#F3C300', '#875692', '#F38400', '#A1CAF1', '#BE0032', '#C2B280', '#848482', '#008856', '#E68FAC', '#0067A5', '#F99379', '#604E97', '#F6A600', '#B3446C', '#DCD300', '#882D17', '#8DB600', '#654522', '#E25822', '#2B3D26']
TAB10_COLORS = sns.color_palette('tab10')
# Index of Kelly's colors associated to each submission.
SUBMISSION_COLORS = {
'AMBER/APR': 'dodgerblue',#KELLY_COLORS[11],
'OpenMM/REVO': 'gold', #KELLY_COLORS[7],
'OpenMM/SOMD': KELLY_COLORS[4],
'GROMACS/EE': 'darkviolet', #KELLY_COLORS[3],
'GROMACS/EE-fullequil': 'hotpink', #KELLY_COLORS[10],
YANK_METHOD_PAPER_NAME: '#4ECC41', #'limegreen', #KELLY_COLORS[9],
'GROMACS/NS-DS/SB-long': KELLY_COLORS[6],
'GROMACS/NS-DS/SB': KELLY_COLORS[1],
'GROMACS/NS-Jarz-F': TAB10_COLORS[0],
'GROMACS/NS-Jarz-R': TAB10_COLORS[1],
'GROMACS/NS-Gauss-F': TAB10_COLORS[2],
'GROMACS/NS-Gauss-R': TAB10_COLORS[4],
'NAMD/BAR': 'saddlebrown'
}
SUBMISSION_LINE_STYLES = {
'AMBER/APR': '--',
'OpenMM/REVO': '-',
'OpenMM/SOMD': '-',
'GROMACS/EE': '-',
'GROMACS/EE-fullequil': '-',
YANK_METHOD_PAPER_NAME: '-',
'GROMACS/NS-DS/SB-long': '-',
'GROMACS/NS-DS/SB': '-',
'GROMACS/NS-Jarz-F': '-',
'GROMACS/NS-Jarz-R': '-',
'GROMACS/NS-Gauss-F': '-',
'GROMACS/NS-Gauss-R': '-',
'NAMD/BAR': '--',
}
N_ENERGY_EVALUATIONS_SCALE = 1e6
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def reduce_to_first_significant_digit(quantity, uncertainty):
"""Truncate a quantity to the first significant digit of its uncertainty."""
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
def load_yank_analysis():
"""Load the YANK analysis in a single dataframe."""
yank_free_energies = {}
for system_id in SYSTEM_IDS:
file_path = os.path.join(YANK_ANALYSIS_DIR_PATH, 'yank-{}.json'.format(system_id))
with open(file_path, 'r') as f:
yank_free_energies[system_id] = json.load(f)
return yank_free_energies
def fit_efficiency(mean_data, find_best_fit=True):
"""Compute the efficiency by fitting the model and using only the asymptotic data.
We fit using the simulation percentage as the independent value
because it is less prone to overflowing during fitting. We then
return the efficiency in units of (kcal/mol)**2/n_energy_evaluations.
"""
from scipy.optimize import curve_fit
def model(x, log_efficiency):
return np.exp(log_efficiency) / x
vars = mean_data['std'].values**2
cost = mean_data['Simulation percentage'].values
# cost = mean_data['N energy evaluations'].values / 1e7
if find_best_fit:
# Find fit with best error up to discarding 70% of calculation.
max_discarded = math.floor(0.5*len(cost))
else:
# Use all the data.
max_discarded = 1
# Fit.
fits = []
for n_discarded in range(max_discarded):
cost_fit = cost[n_discarded:]
vars_fit = vars[n_discarded:]
fit = curve_fit(model, cost_fit, vars_fit, p0=[0.0])
fits.append((np.exp(fit[0]), fit[1]))
# Find the fit with the minimum error.
n_discarded = fits.index(min(fits, key=lambda x: x[1]))
# Convert efficiency / simulation_percentage to efficiency / n_energy_evaluations
efficiency = fits[n_discarded][0][0] / 100 * mean_data['N energy evaluations'].values[-1]
# efficiency = fits[n_discarded][0][0] * 1e7
return efficiency, n_discarded
def export_submissions(submissions, reference_free_energies):
"""Export the submission data to CSV and JSON format."""
for submission in submissions:
exported_data = {}
# Export data of the 5 independent replicates.
for system_id in sorted(submission.data['System ID'].unique()):
system_id_data = submission.data[submission.data['System ID'] == system_id]
exported_data[system_id] = collections.OrderedDict([
('DG', system_id_data[DG_KEY].values.tolist()),
('dDG', system_id_data[DDG_KEY].values.tolist()),
('cpu_times', system_id_data['CPU time [s]'].values.tolist()),
('n_energy_evaluations', system_id_data['N energy evaluations'].values.tolist()),
])
# Export data of mean trajectory and confidence intervals.
mean_free_energies = submission.mean_free_energies()
for system_name in mean_free_energies['System name'].unique():
system_name_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Obtain free energies and bias.
free_energies = system_name_data[DG_KEY].values
free_energies_ci = system_name_data['$\Delta$G CI'].values
reference_diff = free_energies - reference_free_energies.loc[system_name, '$\Delta$G [kcal/mol]']
exported_data[system_name + '-mean'] = collections.OrderedDict([
('DG', free_energies.tolist()),
('DG_CI', free_energies_ci.tolist()),
('reference_difference', reference_diff.tolist()),
('n_energy_evaluations', system_name_data['N energy evaluations'].values.tolist()),
])
# Export.
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, submission.receipt_id)
export_dictionary(exported_data, file_base_path)
# =============================================================================
# PLOTTING FUNCTIONS
# =============================================================================
def plot_mean_free_energy(mean_data, ax, x='Simulation percentage',
color_mean=None, color_ci=None, zorder=None,
start=None, stride=1, scale_n_energy_evaluations=True,
plot_ci=True, **plot_kwargs):
"""Plot mean trajectory with confidence intervals."""
ci_key = '$\Delta$G CI'
if start is None:
# Discard the first datapoint which are 0.0 (i.e. no estimate).
start = np.nonzero(mean_data[DG_KEY].values)[0][0]
if x == 'N energy evaluations' and scale_n_energy_evaluations:
# Plot in millions of energy evaluations.
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
x = mean_data[x].values[start::stride] / scale
mean_dg = mean_data[DG_KEY].values[start::stride]
sem_dg = mean_data[ci_key].values[start::stride]
# Plot mean trajectory confidence intervals.
if plot_ci:
ax.fill_between(x, mean_dg + sem_dg, mean_dg - sem_dg, alpha=0.15, color=color_ci, zorder=zorder)
# Plot the mean free energy trajectory.
if zorder is not None:
# Push the CI shaded area in the background so that the trajectories are always visible.
zorder += 20
ax.plot(x, mean_dg, color=color_mean, alpha=1.0, zorder=zorder, **plot_kwargs)
return ax
def plot_mean_data(mean_data, axes, color=None, ls=None, label=None, x='N energy evaluations',
zorder=None, plot_std=True, plot_bias=True, plot_ci=True):
"""Plot free energy, variance and bias as a function of the cost in three different axes."""
# Do not plot the part of data without index.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
# If the x-axis is the number of energy/force evaluations, plot it in units of millions.
if x == 'N energy evaluations':
scale = N_ENERGY_EVALUATIONS_SCALE
else:
scale = 1
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(mean_data, x=x, ax=axes[0],
color_mean=color, color_ci=color, ls=ls, zorder=zorder,
start=first_nonzero_idx, label=label, plot_ci=plot_ci)
# Plot standard deviation of the trajectories.
if plot_std:
axes[1].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['std'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
if plot_bias:
axes[2].plot(mean_data[x].values[first_nonzero_idx:] / scale,
mean_data['bias'].values[first_nonzero_idx:], color=color, alpha=0.8,
ls=ls, zorder=zorder, label=label)
def align_yaxis(ax1, v1, ax2, v2):
"""Adjust ax2 ylimit so that v2 in in the twin ax2 is aligned to v1 in ax1.
From https://stackoverflow.com/questions/10481990/matplotlib-axis-with-two-scales-shared-origin .
"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
inv = ax2.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
miny, maxy = ax2.get_ylim()
ax2.set_ylim(miny+dy, maxy+dy)
# =============================================================================
# FIGURE 1 - SAMPLING CHALLENGE OVERVIEW
# =============================================================================
def plot_example_bias_variance(yank_analysis, type='mixed', cost='generic',
max_n_eval_percentage=1.0,
mixed_proportion=0.5,
model_free_energy=None,
plot_experimental_value=False):
"""Free energy trajectories used to visualize bias and variance on the plots.
This is used to illustrate how bias and uncertainty are intended in the paper.
Parameters
----------
type : str, optional
Can be 'single' (plot only CB8-G3-1), 'all' (plot all system IDs of CB8-G3),
'mean' (plot mean trajectory and uncertainties), and 'mixed (first part is
all system IDs and second part is mean trajectory and uncertainties).
cost : str, optional
Can be 'generic' (no label on x-axis) or 'neval' (x-axis in number of
energy evaluations).
mixed_proportion : float, optional
The proportion of all System IDs and mean trajectories in mixed-type plots.
"""
# sns.set_context('paper', font_scale=1.6)
sns.set_style('white')
sns.set_context('paper', font_scale=1.0)
# Load the data
n_iterations = 40000
cb8_data = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=False)
cb8_data_mean = yank_analysis.get_free_energies_from_iteration(n_iterations, system_name='CB8-G3', mean_trajectory=True)
max_n_eval = max(cb8_data_mean['N energy evaluations'])
max_n_eval_scaled = int(max_n_eval / N_ENERGY_EVALUATIONS_SCALE)
max_displayed_n_eval = next(x for x in cb8_data_mean['N energy evaluations'] if x >= max_n_eval * max_n_eval_percentage)
max_displayed_n_eval_scaled = int(max_displayed_n_eval / N_ENERGY_EVALUATIONS_SCALE)
# Determine the asymptotic free energy if not given.
if model_free_energy is None:
model_free_energy = cb8_data_mean[DG_KEY].values[-1]
# Scale the number of energy evaluations.
cb8_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(2.5, 1.8))
if type == 'single':
# Plot only CB8-G3-1.
cb8_data_1 = cb8_data[cb8_data['System ID'] == 'CB8-G3-1']
sns.lineplot(data=cb8_data_1, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'all':
# Plot the 5 replicates individual trajectories.
sns.lineplot(data=cb8_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
elif type == 'mean':
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(cb8_data_mean, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci='black',
scale_n_energy_evaluations=True)
elif type == 'mixed':
# Plot all System IDs for the first half and mean/uncertainty in second half.
half_n_eval = max_displayed_n_eval_scaled * mixed_proportion
cb8_data_first_half = cb8_data[cb8_data['N energy evaluations'] <= half_n_eval + max_n_eval_scaled / 100]
sns.lineplot(data=cb8_data_first_half, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
cb8_data_second_half = cb8_data_mean[cb8_data_mean['N energy evaluations'] >= half_n_eval * N_ENERGY_EVALUATIONS_SCALE]
plot_mean_free_energy(cb8_data_second_half, x='N energy evaluations', ax=ax,
color_mean='black', plot_ci=True,
color_ci=(0.3, 0.3, 0.3), scale_n_energy_evaluations=True,
ls='--')
try:
ax.get_legend().remove()
except AttributeError:
pass
# Set limits
x_lim = (0, max_displayed_n_eval_scaled)
ax.set_xlim(x_lim)
y_lim = (-12.5, -10.5)
ax.set_ylim(y_lim)
# Plot model and experiment indication. Both values are not real data, just an example.
model_free_energy = -10.75
final_prediction = cb8_data_mean[cb8_data_mean['N energy evaluations'] == max_displayed_n_eval][DG_KEY].values[0]
ax.plot(x_lim, [model_free_energy]*2, color='gray', ls='--')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, model_free_energy, r'$\Delta$G$_{\theta}$')
ax.text(x_lim[-1]+(max_n_eval_scaled*max_n_eval_percentage)/100, final_prediction - 0.13, r'$\overline{\Delta G}$')
# Plot experimental value horizontal line only for generic plot.
if plot_experimental_value:
experiment_dg = -11.75
plt.plot(x_lim, [experiment_dg]*2, color='black')
if cost == 'neval':
ax.set_xlabel('N force/energy evaluations')
else:
ax.set_xlabel('Computational cost', labelpad=-5)
ax.set_ylabel('$\Delta$G', labelpad=-5)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.tight_layout(pad=0.1, rect=[0.0, 0.0, 0.90, 1.0])
# Save file.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure 1 - host-guest')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'example_trajectories')
plt.savefig(output_base_path + '.pdf')
# =============================================================================
# FIGURE 2 - MEAN ERROR AND RELATIVE EFFICIENCY CARTOON
# =============================================================================
def plot_mean_error_cartoon():
"""Plot the cartoon used to explain mean error and relative efficiency.
This is used as an example to clarify some gotchas with the difinition
of efficiency.
"""
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sns.set_context('paper')
sns.set_style('white')
def err_decay_func_square(decay_coeff, c):
return decay_coeff / np.sqrt(c)
def mean_error_square(decay_coeff, c_min, c_max):
return 2 * decay_coeff * (np.sqrt(c_max) - np.sqrt(c_min)) / (c_max - c_min)
def err_decay_func_B(decay_coeff, c):
return decay_coeff / c**(5/6)
def mean_error_B(decay_coeff, c_min, c_max):
return 6 * decay_coeff * (c_max**(1/6) - c_min**(1/6)) / (c_max - c_min)
decay_coeffs = {
'A': 1.0,
'B': 2.5,
'Z': 1.5,
}
c_ranges = collections.OrderedDict([
("A'", np.arange(1, 4.5, 0.1)),
("A''", np.arange(3, 6, 0.1)),
("B", np.arange(2, 6.5, 0.1)),
("Z", np.arange(1, 6.5, 0.1)),
])
# Determine colors colors.
colors = {m: 'C'+str(i) for i, m in enumerate(sorted(c_ranges))}
# Plot the error trajectories.
fig, ax = plt.subplots(figsize=(3.5, 2.6))
# method_names = ["B", "Z", "A'", "A''"]
method_names = ["Z", "A'", "A''"]
for method_name in method_names:
color = colors[method_name]
c_range = c_ranges[method_name]
decay_coeff = decay_coeffs[method_name[0]]
if method_name == 'B':
err_decay_func = err_decay_func_B
else:
err_decay_func = err_decay_func_square
err = err_decay_func(decay_coeff, c_range)
# Plot error area.
ax.plot(c_range, err, color=color, label=method_name, zorder=1)
ax.fill_between(c_range, err, 0, color=color, alpha=0.5, zorder=0)
# Add method label.
c_method_label_idx = int(len(c_range) / 8)
ax.text(c_range[c_method_label_idx], err[c_method_label_idx]+0.01, method_name, fontsize=12)
if method_name[0] == 'A':
# Plot mean error.
c_min, c_max = min(c_range), max(c_range)
mean_err = mean_error_square(decay_coeff, c_min, c_max)
# Start mean error horizontal line from the error curve.
c_mean = (decay_coeff / mean_err)**2
ax.plot([0, c_mean], [mean_err, mean_err], color='black', ls='--', alpha=0.8, zorder=1)
# Add label mean error.
# ax.text(1.05, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
ax.text(-0.3, mean_err+0.025, '$\mathbb{E}[RMSE_{' + method_name + '}]$', fontsize=9)
# Add c_min/max labels.
ax.text(c_min-0.4, -0.1, 'c$_{min,' + method_name + '}$', fontsize=9)
ax.text(c_max-0.4, -0.1, 'c$_{max,' + method_name + '}$', fontsize=9)
# Configure axes.
ax.set_xlim(1, 6.4)
ax.set_ylim(0, 2)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylabel('$RMSE(\Delta G)$')
ax.set_xlabel('computational cost')
# Pull axes labels closest to axes.
ax.tick_params(axis='x', which='major', pad=2.0)
ax.yaxis.set_label_coords(0.0, 0.65)
# Plot the relative efficiencies in an inset plot.
ax_ins = inset_axes(ax, width='100%', height='100%', bbox_to_anchor=[145, 115, 90, 50])
# Compute relative efficiencies with respect to Z.
relative_efficiencies = collections.OrderedDict()
for method_name in [name for name in method_names if name != 'Z']:
c_min, c_max = min(c_ranges[method_name]), max(c_ranges[method_name])
if method_name == 'B':
mean_error_func = mean_error_B
else:
mean_error_func = mean_error_square
mean_err_method = mean_error_func(decay_coeffs[method_name[0]], c_min, c_max)
mean_err_Z = mean_error_square(decay_coeffs['Z'], c_min, c_max)
relative_efficiencies[method_name] = -np.log(mean_err_method/mean_err_Z)
# Plot horizontal bar plot with all efficiencies.
labels, rel_effs = zip(*relative_efficiencies.items())
bar_colors = [colors[m] for m in labels]
labels = [l + '/Z' for l in labels]
# labels = ['$e_{err,' + str(l) + '/Z}$' for l in labels]
ax_ins.barh(y=labels, width=rel_effs, color=bar_colors, alpha=0.85)
ax_ins.set_title('relative efficiency', pad=2.5)
# plt.tight_layout(rect=[0.0, 0.0, 1.0, 1.0])
plt.tight_layout(rect=[0.1, 0.0, 1.0, 1.0])
# Pull axes labels closest to axes.
ax_ins.set_xticks([0.0])
ax_ins.grid(axis='x')
ax_ins.tick_params(axis='x', which='major', pad=0.0)
ax_ins.tick_params(axis='y', which='major', pad=0.0)
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure2-efficiency_cartoon')
os.makedirs(output_dir_path, exist_ok=True)
plt.savefig(os.path.join(output_dir_path, 'error_trajectories.pdf'))
# =============================================================================
# FIGURE 3 - FREE ENERGY TRAJECTORIES
# =============================================================================
def plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=None,
plot_std=True, plot_bias=True, plot_bias_to_reference=False,
system_names=None):
"""Plot free energy trajectories, std, and bias of the given submissions."""
if system_names is None:
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
n_systems = len(system_names)
max_n_energy_evaluations = {system_name: 0 for system_name in system_names}
min_n_energy_evaluations = {system_name: np.inf for system_name in system_names}
# Handle default arguments.
if y_limits is None:
# 3 by 3 matrix of y limits for the plots.
y_limits = [[None for _ in range(n_systems)] for _ in range(n_systems)]
# We need a 2D array of axes for the code to work even if we're not plotting std or bias.
try:
axes_shape = len(axes.shape)
except AttributeError:
axes = np.array([[axes]])
else:
if axes_shape == 1:
axes = np.array([axes])
# Build a dictionary mapping submissions and system names to their mean data.
all_mean_data = {}
for submission in submissions:
# We always want to print in order
all_mean_data[submission.paper_name] = {}
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge.
if submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3':
continue
# Add mean free energies for this system.
system_mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
n_energy_evaluations = system_mean_data['N energy evaluations'].values[-1]
all_mean_data[submission.paper_name][system_name] = system_mean_data
# Keep track of the maximum and minimum number of energy evaluations,
# which will be used to determine how to truncate the plotted reference
# data and determine the zorder of the trajectories respectively.
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
min_n_energy_evaluations[system_name] = min(min_n_energy_evaluations[system_name],
n_energy_evaluations)
# Add also reference YANK calculations if provided.
if yank_analysis is not None:
all_mean_data[YANK_METHOD_PAPER_NAME] = {}
for system_name in system_names:
system_mean_data = yank_analysis.get_free_energies_from_energy_evaluations(
max_n_energy_evaluations[system_name], system_name=system_name, mean_trajectory=True)
all_mean_data[YANK_METHOD_PAPER_NAME][system_name] = system_mean_data
# Create a table mapping submissions and system name to the zorder used
# to plot the free energy trajectory so that smaller shaded areas are on
# top of bigger ones.
# First find the average CI for all methods up to min_n_energy_evaluations.
methods_cis = {name: {} for name in system_names}
for method_name, method_mean_data in all_mean_data.items():
for system_name, system_mean_data in method_mean_data.items():
# Find index of all energy evaluations < min_n_energy_evaluations.
n_energy_evaluations = system_mean_data['N energy evaluations'].values
last_idx = np.searchsorted(n_energy_evaluations, min_n_energy_evaluations[system_name], side='right')
cis = system_mean_data['$\Delta$G CI'].values[:last_idx]
methods_cis[system_name][method_name] = np.mean(cis)
# For each system, order methods from smallest CI (plot on top) to greatest CI (background).
zorders = {name: {} for name in system_names}
for system_name, system_cis in methods_cis.items():
ordered_methods = sorted(system_cis.keys(), key=lambda method_name: system_cis[method_name])
for zorder, method_name in enumerate(ordered_methods):
zorders[system_name][method_name] = zorder
# The columns are in order CB8-G3, OA-G3, and OA-G6.
system_columns = {'CB8-G3': 0, 'OA-G3': 1, 'OA-G6': 2}
# Plot submissions in alphabetical order to order he legend labels.
for method_name in sorted(all_mean_data.keys()):
submission_mean_data = all_mean_data[method_name]
submission_color = SUBMISSION_COLORS[method_name]
submission_ls = SUBMISSION_LINE_STYLES[method_name]
# Plot free energy trajectories.
for system_name, mean_data in submission_mean_data.items():
ax_idx = system_columns[system_name]
# The OA prediction of the NS short protocol are the same of the long protocol submission file.
if method_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3':
# Just add the label.
axes[0][ax_idx].plot([], color=submission_color, ls=submission_ls, label=method_name)
continue
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values[-1]
max_n_energy_evaluations[system_name] = max(max_n_energy_evaluations[system_name],
n_energy_evaluations)
# Determine zorder and plot.
zorder = zorders[system_name][method_name]
plot_mean_data(mean_data, axes[:,ax_idx], color=submission_color,
ls=submission_ls, zorder=zorder, label=method_name,
plot_std=plot_std, plot_bias=plot_bias)
# Fix labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
if plot_bias:
axes[2][0].set_ylabel('bias [kcal/mol]')
central_column_idx = int(len(axes[0])/2)
axes[-1][central_column_idx].set_xlabel('number of energy/force evaluations [10$^6$]')
# Fix axes limits.
for ax_idx, system_name in enumerate(system_names):
for row_idx in range(len(axes)):
ax = axes[row_idx][ax_idx]
# Set the x-axis limits.
ax.set_xlim((0, max_n_energy_evaluations[system_name]/N_ENERGY_EVALUATIONS_SCALE))
# Keep the x-axis label only at the bottom row.
if row_idx != len(axes)-1:
ax.xaxis.set_ticklabels([])
y_lim = y_limits[row_idx][ax_idx]
if y_lim is not None:
ax.set_ylim(y_lim)
# Set the system name in the title.
axes[0][ax_idx].set_title(system_name)
# Create a bias axis AFTER the ylim has been set.
if yank_analysis is not None and plot_bias_to_reference:
for ax_idx, (system_name, ax) in enumerate(zip(system_names, axes[0])):
yank_full_mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
ref_free_energy = yank_full_mean_data[DG_KEY].values[-1]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to fix the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if ax_idx == n_systems - 1:
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
def plot_all_entries_trajectory(submissions, yank_analysis, zoomed=False):
"""Plot free energy trajectories, std, and bias of the challenge entries."""
sns.set_style('whitegrid')
sns.set_context('paper')
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
if zoomed:
figsize = (7.25, 7.0) # Without REVO
else:
figsize = (7.25, 7.0) # With REVO
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=figsize)
# Optionally, remove REVO.
if zoomed:
submissions = [s for s in submissions if s.name not in ['WExploreRateRatio']]
if zoomed:
# Y-axis limits when REVO calculations are excluded.
y_limits = [
[(-15, -10), (-9, -4), (-9, -4)],
[(0, 2), (0, 0.8), (0, 0.8)],
[(-3, 1), (-0.6, 0.6), (-0.6, 0.6)],
]
else:
# Y-axis limits when REVO calculations are included.
y_limits = [
[(-17, -9), (-13, -5), (-13, -5)],
[(0, 2), (0, 1.75), (0, 1.75)],
[(-4, 4), (-0.6, 0.6), (-0.6, 0.6)],
]
plot_submissions_trajectory(submissions, yank_analysis, axes, y_limits=y_limits)
# Show/save figure.
if zoomed:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92], w_pad=0.0) # Without REVO
else:
plt.tight_layout(h_pad=0.2, rect=[0.0, 0.00, 1.0, 0.92]) # With REVO
# Plot legend.
if zoomed:
# bbox_to_anchor = (2.52, 1.55) # Without REVO.
bbox_to_anchor = (2.4, 1.48)
else:
bbox_to_anchor = (2.4, 1.48) # With REVO.
axes[0][1].legend(loc='upper right', bbox_to_anchor=bbox_to_anchor,
fancybox=True, ncol=4)
plt.subplots_adjust(wspace=0.35)
# plt.show()
if zoomed:
file_name = 'Figure3-free_energy_trajectories_zoomed'
else:
file_name = 'Figure3-free_energy_trajectories'
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure3-free_energy_trajectories')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, file_name)
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 4 - NONEQUILIBRIUM SWITCHING ESTIMATOR COMPARISON
# =============================================================================
def plot_all_nonequilibrium_switching(submissions):
"""Plot free energy trajectories, std, and bias of the nonequilibrium-switching calculations."""
# Create a figure with 3 columns (one for each system) and 2 rows.
# The first row contains the free energy trajectory and CI, the second
# a plot of the estimator variance, and the third the bias to the
# asymptotic value.
figsize = (7.25, 3.5)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)
# Select nonequilibrium-switching calculations with estimators.
submissions = [s for s in submissions if 'NS' in s.paper_name]
# Y-axis limits.
y_limits = [
[(-20, 5), (-40, 0), (-40, 0)]
]
plot_submissions_trajectory(submissions, yank_analysis=None, axes=axes,
y_limits=y_limits, plot_std=False, plot_bias=False)
# Show/save figure.
plt.tight_layout(pad=0.0, rect=[0.0, 0.00, 1.0, 0.85])
# Plot legend.
legend = axes[0].legend(loc='upper left', bbox_to_anchor=(0.6, 1.3),
fancybox=True, ncol=3)
# Change legend labels to refer to estimator used rather than overall method ID.
legend_labels_map = {
'GROMACS/NS-DS/SB-long': 'BAR-long',
'GROMACS/NS-DS/SB': 'BAR',
'GROMACS/NS-Jarz-F': 'Jarzynski-Forward',
'GROMACS/NS-Jarz-R': 'Jarzynski-Reverse',
'GROMACS/NS-Gauss-F': 'Gaussian-Forward',
'GROMACS/NS-Gauss-R': 'Gaussian-Reverse',
}
for text in legend.get_texts():
text.set_text(legend_labels_map[text.get_text()])
plt.subplots_adjust(wspace=0.35)
# plt.show()
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure4-nonequilibrium_comparison')
os.makedirs(figure_dir_path, exist_ok=True)
output_base_path = os.path.join(figure_dir_path, 'Figure4-nonequilibrium_comparison')
plt.savefig(output_base_path + '.pdf')
# plt.savefig(output_base_path + '.png', dpi=500)
# =============================================================================
# FIGURE 5 - BAROSTAT AND RESTRAINT
# =============================================================================
# Directories containing the volume information of YANK and GROMACS/EE.
BAROSTAT_DATA_DIR_PATH = os.path.join('..', 'SAMPLing', 'Data', 'BarostatData')
YANK_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'YankVolumes')
EE_VOLUMES_DIR_PATH = os.path.join(BAROSTAT_DATA_DIR_PATH, 'EEVolumes')
def plot_volume_distributions(axes, plot_predicted=False):
"""Plot the volume distributions obtained with Monte Carlo and Berendsen barostat."""
import scipy.stats
import scipy.integrate
from simtk import unit
# Load data.
mc_volumes = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure100.npy'))),
(100, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'volumes_pressure10000.npy'))),
])
mc_volumes_hrex = collections.OrderedDict([
(1, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state0.npy'))),
(58, np.load(os.path.join(YANK_VOLUMES_DIR_PATH, 'hrex_state_volumes_state58.npy'))),
])
b_volumes = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_vanilla.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_vanilla.npy'))),
])
b_volumes_ee = collections.OrderedDict([
(1, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '1atm_expanded.npy'))),
(100, np.load(os.path.join(EE_VOLUMES_DIR_PATH, '100atm_expanded.npy'))),
])
# Print some statistics for each distribution.
for volume_trajectories, label in [(mc_volumes, 'MC-MD '),
(mc_volumes_hrex, 'MC-HREX'),
(b_volumes, 'BB-MD '),
(b_volumes_ee, 'BB-EE ')]:
for pressure, trajectory in volume_trajectories.items():
n = len(trajectory)
t_stat = 2.326 # 98% CI
mean = np.mean(trajectory)
sem = scipy.stats.sem(trajectory)
mean_ci = t_stat * sem
var = np.var(trajectory, ddof=1)
# Standard error of variance if volume is gaussianly distributed
sev = var * np.sqrt(2 / (n-1))
var_ci = t_stat * sev
skew = scipy.stats.skew(trajectory)
# Standard error of skewness if volume is gaussianly distributed
ses = np.sqrt( 6*n*(n-1) / ((n-2)*(n+1)*(n+3)) )
skew_ci = t_stat * ses
print('{}-{} (n={}): mean={:.3f} +- {:.3f}nm^3\t\tvar={:.3f} +- {:.3f}\tskew={:.3f} +- {:.3f}'.format(
pressure, label, n, mean, mean_ci, var, var_ci, skew, skew_ci))
# Plot the 1atm vs 100atm comparison.
barostats = ['B', 'MC']
for ax, volume_trajectories, barostat in zip(axes, [b_volumes, mc_volumes], barostats):
barostat += ',MD'
barostat = 'MD'
for pressure, trajectory in volume_trajectories.items():
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)'.format(barostat, pressure)
ax = sns.distplot(trajectory, label=label, hist=False, ax=ax)
if plot_predicted:
# Plot predicted distribution.
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * 298.15*unit.kelvin)
p1 = 1.0 * unit.atmosphere
p2 = 100.0 * unit.atmosphere
volumes = np.linspace(78.0, 82.0, num=200)
fit = scipy.stats.norm
# Fit the original distribution.
original_pressure, new_pressure = list(volume_trajectories.keys())
original_trajectory = list(volume_trajectories.values())[0]
fit_parameters = fit.fit(original_trajectory)
# Find normalizing constant predicted distribution.
predicted_distribution = lambda v: np.exp(-beta*(p2 - p1)*v*unit.nanometer**3) * fit.pdf([v], *fit_parameters)
normalizing_factor = scipy.integrate.quad(predicted_distribution, volumes[0], volumes[-1])[0]
predicted = np.array([predicted_distribution(v) / normalizing_factor for v in volumes])
# Set the scale.
label = '$\\rho_{{\mathrm{{{}}}}}$(V|{}atm)$\cdot e^{{\\beta ({}atm - {}atm) V}}$'.format(barostat, original_pressure, new_pressure, original_pressure)
ax.plot(volumes, predicted, ls='--', label=label)
# ax.plot(volumes, [fit.pdf([v], *fit_parameters) for v in volumes], label='original')
# Plot comparison MD vs expanded ensemble and HREX volumes.
for ax_idx, (trajectory, label) in enumerate([
(b_volumes_ee[1], 'B,EE'), (mc_volumes_hrex[1], 'MC,HREX')
]):
label = 'E'
ax = axes[ax_idx]
label = '$\\rho_{{\mathrm{{{}}}}}$(V|1atm)'.format(label)
sns.distplot(trajectory, label=label, hist=False, ax=ax)
# Set titles and configure axes.
axes[0].set_title('Berendsen barostat volume distribution', pad=2.0)
axes[1].set_title('Monte Carlo barostat volume distribution', pad=2.0)
for ax_idx in range(len(axes)):
axes[ax_idx].set_xlim((78.8, 81.2))
axes[ax_idx].set_ylim((0.0, 6.0))
axes[ax_idx].set_ylabel('density')
axes[0].set_xlabel('', labelpad=0.3)
axes[1].set_xlabel('Volume [nm^3]', labelpad=0.3)
# Create single legend for both MC and B barostat axes.
bbox_to_anchor = (-0.1, -0.15)
axes[0].legend(fontsize='xx-small', loc='upper left', bbox_to_anchor=bbox_to_anchor, ncol=4,
fancybox=True, labelspacing=0.7, handletextpad=0.4, columnspacing=1.1,)
# axes[0].get_legend().remove()
axes[1].get_legend().remove()
plt.tight_layout(pad=0, rect=[0.0, 0.0, 1.0, 1.0])
# Directory with the restraint information.
RESTRAINT_DATA_DIR_PATH = os.path.join('YankAnalysis', 'RestraintAnalysis')
# The state index of the discharged state with LJ interactions intact.
DISCHARGED_STATE = {
'CB8-G3': 25,
'OA-G3': 32,
'OA-G6': 29
}
# The final free energy predictions without restraint unbiasing.
BIASED_FREE_ENERGIES = {
'CB8-G3-0': -10.643,
'CB8-G3-1': -10.533,
'CB8-G3-2': -10.463,
'CB8-G3-3': None, # TODO: Run the biased analysis
'CB8-G3-4': -10.324,
'OA-G3-0': -5.476,
'OA-G3-1': -5.588,
'OA-G3-2': -5.486,
'OA-G3-3': -5.510,
'OA-G3-4': -5.497,
'OA-G6-0': -5.669,
'OA-G6-1': -5.665,
'OA-G6-2': -5.767,
'OA-G6-3': -5.737,
'OA-G6-4': -5.788,
}
def plot_restraint_distance_distribution(system_id, ax, kde=True, iteration_set=None):
"""Plot the distribution of restraint distances at bound, discharged, and decoupled states.
Return the 99.99-percentile restraint radius that was used as a cutoff during analysis.
"""
n_iterations = YANK_N_ITERATIONS + 1 # Count also iteration 0.
system_name = system_id[:-2]
discharged_state_idx = DISCHARGED_STATE[system_name]
# Load all distances cached during the analysis.
cache_dir_path = os.path.join('pkganalysis', 'cache', system_id.replace('-', ''))
cached_distances_file_path = os.path.join(cache_dir_path, 'restraint_distances_cache.npz')
distances_kn = np.load(cached_distances_file_path)['arr_0']
# Distances are in nm but we plot in Angstrom.
distances_kn *= 10
n_states = int(len(distances_kn) / n_iterations)
# Use the same colors that are used in the water analysis figures.
color_palette = sns.color_palette('viridis', n_colors=n_states)
color_palette = [color_palette[i] for i in (0, discharged_state_idx, -1)]
# Isolate distances in the bound, discharged (only LJ), and decoupled state.
distances_kn_bound = distances_kn[:n_iterations]
distances_kn_discharged = distances_kn[(discharged_state_idx-1)*n_iterations:discharged_state_idx*n_iterations]
distances_kn_decoupled = distances_kn[(n_states-1)*n_iterations:]
# Filter iterations.
if iteration_set is not None:
distances_kn_bound = distances_kn_bound[iteration_set]
distances_kn_discharged = distances_kn_discharged[iteration_set]
distances_kn_decoupled = distances_kn_decoupled[iteration_set]
assert len(distances_kn_bound) == len(distances_kn_decoupled)
# Plot the distributions.
# sns.distplot(distances_kn, ax=ax, kde=True, label='all states')
sns.distplot(distances_kn_bound, ax=ax, kde=kde, label='bound', color=color_palette[0])
sns.distplot(distances_kn_discharged, ax=ax, kde=kde, label='discharged', color=color_palette[1])
sns.distplot(distances_kn_decoupled, ax=ax, kde=kde, label='decoupled', color=color_palette[2])
# Plot the threshold used for analysis, computed as the
# 99.99-percentile of all distances in the bound state.
distance_cutoff = np.percentile(a=distances_kn_bound, q=99.99)
limits = ax.get_ylim()
ax.plot([distance_cutoff for _ in range(100)],
np.linspace(limits[0], limits[1]/2, num=100), color='black')
return distance_cutoff
def plot_restraint_profile(system_id, ax, restraint_cutoff):
"""Plot the free energy as a function of the restraint cutoff."""
# Load the free energy profile for this system.
restraint_profile_file_path = os.path.join(RESTRAINT_DATA_DIR_PATH,
system_id.replace('-', '') + '.json')
with open(restraint_profile_file_path, 'r') as f:
free_energies_profile = json.load(f)
# Reorder the free energies by increasing cutoff and convert str keys to floats.
free_energies_profile = [(float(d), f) for d, f in free_energies_profile.items()]
free_energies_profile = sorted(free_energies_profile, key=lambda x: x[0])
distance_cutoffs, free_energies = list(zip(*free_energies_profile))
f, df = list(zip(*free_energies))
# Convert string to floats.
distance_cutoffs = [float(c) for c in distance_cutoffs]
# Plot profile.
ax.errorbar(x=distance_cutoffs, y=f, yerr=df, label='after reweighting')
# Plot biased free energy
biased_f = BIASED_FREE_ENERGIES[system_id]
x = np.linspace(*ax.get_xlim())
ax.plot(x, [biased_f for _ in x], label='before reweighting')
# Plot restraint distance cutoff.
limits = ax.get_ylim()
x = [restraint_cutoff for _ in range(100)]
y = np.linspace(limits[0], limits[1], num=100)
ax.plot(x, y, color='black')
def plot_restraint_analysis(system_id, axes):
"""Plot distribution of restraint distances and free energy profile on two axes."""
# Histograms of restraint distances/energies.
ax = axes[0]
kde = True
restraint_cutoff = plot_restraint_distance_distribution(system_id, ax, kde=kde)
# Set restraint distance distribution lables and titles.
ax.set_title('Restrained ligand-receptor distance', pad=2.0)
if kde is False:
ax.set_ylabel('Number of samples')
else:
ax.set_ylabel('density')
ax.legend(loc='upper right', fontsize='x-small')
ax.set_xlabel('Restrained distance [$\mathrm{\AA}$]', labelpad=0.3)
# Free energy as a function of restraint distance.
ax = axes[1]
ax.set_title('$\Delta G$ as a function of restraint radius cutoff', pad=2.0 )
plot_restraint_profile(system_id, ax, restraint_cutoff)
# Labels and legend.
ax.set_xlabel('Restraint radius cutoff [$\mathrm{\AA}$]', labelpad=0.3)
ax.set_ylabel('$\Delta G$ [kcal/mol]')
ax.legend(fontsize='x-small')
def plot_restraint_and_barostat_analysis():
"""Plot the Figure showing info for the restraint and barostat analysis."""
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('whitegrid')
sns.set_context('paper', font_scale=1.0)
# Create two columns, each of them share the x-axis.
fig = plt.figure(figsize=(7.25, 4))
# Restraint distribution axes.
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(223, sharex=ax1)
barostat_axes = [ax1, ax2]
# Volume distribution axes.
ax3 = fig.add_subplot(222)
ax4 = fig.add_subplot(224, sharex=ax3)
restraint_axes = [ax3, ax4]
# Plot barostat analysis.
plot_volume_distributions(barostat_axes, plot_predicted=True)
# Plot restraint analysis.
system_id = 'OA-G3-0'
plot_restraint_analysis(system_id, restraint_axes)
# Configure axes.
restraint_axes[0].set_xlim((0, 10.045))
restraint_axes[1].set_ylim((-7, -3.9))
for ax in restraint_axes + barostat_axes:
ax.tick_params(axis='x', which='major', pad=0.1)
ax.tick_params(axis='y', which='major', pad=0.1)
plt.tight_layout(pad=0.3)
# plt.show()
output_file_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure5-restraint_barostat',
'restraint_barostat.pdf')
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
# =============================================================================
# FIGURE 6 - HREX INITIAL BIAS
# =============================================================================
def plot_yank_system_bias(system_name, data_dir_paths, axes, shift_to_origin=True, plot_std=True):
"""Plot the YANK free energy trajectoies when discarding initial samples for a single system."""
color_palette = sns.color_palette('viridis', n_colors=len(data_dir_paths)+1)
# Plot trajectories with truncated data.
all_iterations = set()
for data_idx, data_dir_path in enumerate(data_dir_paths):
yank_analysis = YankSamplingAnalysis(data_dir_path)
# In the YankAnalysis folder, each analysis starting from
# iteration N is in the folder "iterN/".
last_dir_name = os.path.basename(os.path.normpath(data_dir_path))
label = last_dir_name[4:]
# First color is for the full data.
color = color_palette[data_idx+1]
# Collect all iterations that we'll plot for the full data.
mean_data = yank_analysis.get_system_free_energies(system_name, mean_trajectory=True)
all_iterations.update(mean_data['HREX iteration'].values.tolist())
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label=label, plot_std=plot_std, plot_bias=False, plot_ci=False)
# Plot trajectory with full data.
color = color_palette[0]
# Plot an early iteration and all the iterations analyzed for the bias.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
system_ids = [system_name + '-' + str(i) for i in range(5)]
first_iteration = yank_analysis.get_system_iterations(system_ids[0])[2]
iterations = [first_iteration] + sorted(all_iterations)
mean_data = yank_analysis._get_free_energies_from_iterations(
iterations, system_ids, mean_trajectory=True)
# Simulate plotting starting from the origin.
if shift_to_origin:
mean_data['HREX iteration'] -= mean_data['HREX iteration'].values[0]
# Simulate ploatting starting from the origin.
plot_mean_data(mean_data, axes, x='HREX iteration', color=color,
label='0', plot_std=plot_std, plot_bias=False, plot_ci=False)
axes[0].set_title(system_name)
def plot_yank_bias(plot_std=True, figure_dir_path=None):
"""Plot YANK free energy trajectories when discarding initial samples."""
# In the first column, plot the "unshifted" trajectory of CB8-G3,
# with all sub-trajectories shifted to the origin. In the second
# and third columns, plot the trajectories of CB8-G3 and OA-G3
# with all sub-trajectories shifted to the origin.
what_to_plot = [
('CB8-G3', False),
# ('CB8-G3', True),
('OA-G3', False),
# ('OA-G3', False),
('OA-G6', False),
]
if plot_std:
n_rows = 2
else:
n_rows = 1
n_cols = len(what_to_plot)
fig, axes = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(7.25, 4.0))
# The loops are based on a two dimensional array of axes.
if n_rows == 1:
axes = np.array([axes])
# Sort paths by how many samples they have.
data_dir_paths = ['YankAnalysis/BiasAnalysis/iter{}/'.format(i) for i in [1000, 2000, 4000, 8000, 16000, 24000]]
for column_idx, (system_name, shift_to_origin) in enumerate(what_to_plot):
plot_yank_system_bias(system_name, data_dir_paths, axes[:,column_idx],
shift_to_origin=shift_to_origin, plot_std=plot_std)
title = system_name + ' (shifted)' if shift_to_origin else system_name
axes[0,column_idx].set_title(title)
# Fix axes limits and labels.
ylimits = {
'CB8-G3': (-12.5, -10.5),
'OA-G3': (-8, -6),
'OA-G6': (-8, -6)
}
for column_idx, (system_name, _) in enumerate(what_to_plot):
axes[0][column_idx].set_ylim(ylimits[system_name])
if plot_std:
axes[1][column_idx].set_ylim((0, 0.6))
for row_idx, ax_idx in itertools.product(range(n_rows), range(n_cols)):
# Control the number of ticks for the x axis.
axes[row_idx][ax_idx].locator_params(axis='x', nbins=4)
# Set x limits for number of iterations.
axes[row_idx][ax_idx].set_xlim((0, YANK_N_ITERATIONS))
# Remove ticks labels that are shared with the last row.
for row_idx, ax_idx in itertools.product(range(n_rows-1), range(n_cols)):
axes[row_idx][ax_idx].set_xticklabels([])
# Set axes labels.
axes[0][0].set_ylabel('$\Delta$G [kcal/mol]')
if plot_std:
axes[1][0].set_ylabel('std($\Delta$G) [kcal/mol]')
# If there is an odd number of columns print x label only on the central one.
if n_cols % 2 == 1:
axes[-1][1].set_xlabel('HREX iteration')
else:
for ax in axes[-1]:
ax.set_xlabel('HREX iteration')
plt.tight_layout(h_pad=0.1, rect=[0.0, 0.00, 1.0, 0.91])
handles, labels = axes[0][0].get_legend_handles_labels()
handles = [handles[-1]] + handles[:-1]
labels = [labels[-1]] + labels[:-1]
bbox_to_anchor = (0.4, 1.53)
axes[0][0].legend(handles, labels, loc='upper left', bbox_to_anchor=bbox_to_anchor,
title='number of discarded initial iterations', ncol=len(data_dir_paths)+1,
fancybox=True, labelspacing=0.8, handletextpad=0.5, columnspacing=1.2,
fontsize='small')
# plt.show()
if figure_dir_path is None:
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'Figure6-bias_hrex')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_path = os.path.join(figure_dir_path, 'Figure6-bias_hrex')
plt.savefig(output_file_path + '.pdf')
# plt.savefig(output_file_path + '.png', dpi=600)
# =============================================================================
# SUPPORTING INFORMATION - EXAMPLE OF HREX BIAS
# =============================================================================
def simulate_correlation_samples():
"""Simulation of bias from same initial configuration.
There are 3 states as different harmonic oscillators, but all
or almost all the samples come from the first (bound) state to
simulate what happens when they don't decorrelate fast enough.
The hypothesis is that most is that starting from the bound
state causes the initial free energy to be artificially negative
if the correlation times are long.
The second (discharged) state is just a shifted harmonic oscillator
(same free energy as bound state). The third (unbound) is shifted
and has much higher entropy.
"""
from numpy.random import normal
from pymbar import MBAR
def harmonic_oscillator_free_energy(sigma):
"""Analytical expression for the free energy of a harmonic oscillator."""
#return - np.log(2 * np.pi * sigma**2) * 3.0 / 2.0 # 3D oscillator
return - np.log(np.sqrt(2 * np.pi) * sigma)
def harmonic_oscillator_potential(x, loc, std):
"""Compute potential of the given positions given location
and standard deviation of the Gaussian distribution.
Potentials are returned in units of kT.
"""
spring_constant = 1 / std**2
return spring_constant / 2.0 * (x - loc)**2
def print_free_energies(Deltaf_ij, dDeltaf_ij):
mbar_str = ', '.join(['{:.4f} +- {:.4f}'.format(f, df) for f, df in zip(Deltaf_ij[:,0], dDeltaf_ij[:,0])])
print('MBAR :', mbar_str)
analytical_str = ', '.join(['{:.4f} '.format(f) for f in analytical_Deltaf])
print('Analytical:', analytical_str)
def compute_mbar_free_energy(all_samples, shifts, stds, analytical_f):
n_states = len(all_samples)
# u_kn[k,n] is the reduced potential energy n-th sample evaluated at state k.
u_kn = np.empty(shape=(n_states, n_states*n_samples))
# Convert samples to potentials.
for k in range(n_states):
for sampled_k, samples in enumerate(all_samples):
start = sampled_k * n_samples
end = (sampled_k + 1) * n_samples
u_kn[k,start:end] = harmonic_oscillator_potential(samples, loc=shifts[k], std=stds[k])
# Compute MBAR free energy.
N_k = np.array([n_samples] * n_states)
mbar = MBAR(u_kn, N_k=N_k, initial_f_k=analytical_f)
Deltaf_ij, dDeltaf_ij, _ = mbar.getFreeEnergyDifferences()
return Deltaf_ij, dDeltaf_ij
# Determine standard deviation and shift of the harmonic distributions.
n_samples = 5000000
stds = np.array([2.0, 2.0, 5.0])
shifts = np.array([0.0, 2.0, 2.0])
print('\nspring constants:', 1 / stds**2)
# Compute analytical free energy.
analytical_f = np.array([harmonic_oscillator_free_energy(s) for s in stds])
analytical_Deltaf = np.array([analytical_f[0] - analytical_f[i] for i in range(len(stds))])
# FIRST TEST.
# Sample from all states and verify that MBAR free energy is correct.
# -------------------------------------------------------------------
all_samples = [normal(loc=l, scale=s, size=n_samples) for l, s in zip(shifts, stds)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# SECOND TEST.
# Check if the bias is not due to lack of overlap. If we sample only the end states the estimate should be correct.
# -----------------------------------------------------------------------------------------------------------------
for i in range(1, len(all_samples)):
all_samples_bar = [all_samples[0], all_samples[i]]
shifts_bar = [shifts[0], shifts[i]]
stds_bar = [stds[0], stds[i]]
analytical_f_bar = [analytical_f[0], analytical_f[i]]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples_bar, shifts_bar, stds_bar, analytical_f_bar)
print('\nBAR_{}0'.format(i))
print_free_energies(Deltaf_ij, dDeltaf_ij)
# THIRD TEST.
# Now sample from only the bound state to see how the free energy changes.
# ------------------------------------------------------------------------
all_samples[1:] = [normal(loc=shifts[0], scale=stds[0], size=n_samples) for _ in range(len(stds)-1)]
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# FOURTH TEST.
# Now let the unbound state decorrelate fast (i.e. sample from its own distribution).
# -----------------------------------------------------------------------------------
all_samples[-1] = normal(loc=shifts[-1], scale=stds[-1], size=n_samples)
Deltaf_ij, dDeltaf_ij = compute_mbar_free_energy(all_samples, shifts, stds, analytical_f)
print()
print_free_energies(Deltaf_ij, dDeltaf_ij)
# RESULT: SUCCESS!!!
# =============================================================================
# SUPPORTING INFORMATION - COMPLEX/SOLVENT and ENTROPY/ENTHALPY DECOMPOSITION
# =============================================================================
def _mean_data_decomposition(data):
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
try:
# This may fail if we have computed different iterations for each.
data = np.array(data, dtype=np.float)
except ValueError:
data_lengths = [len(x) for x in data]
print('Warning: Truncating data of shape {}'.format(data_lengths))
min_length = min(data_lengths)
data = [x[:min_length] for x in data]
data = np.array(data, dtype=np.float)
# Compute std and mean along the trajectory ignoring NaNs.
return np.nanmean(data, axis=0), np.nanstd(data, axis=0)
def _plot_phase_decomposition(ax, phase_free_energies):
# Shortcuts.
data = phase_free_energies
label = '$\Delta$G'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
phase_axes = {
'complex': ax.twinx(),
'solvent': ax.twinx()
}
phase_colors = {
'complex': 'C1',
'solvent': 'C0',
}
for ax_name in sorted(phase_axes):
phase_axes[ax_name].set_ylabel(label + ' ' + ax_name + ' [kcal/mol]',
color=phase_colors[ax_name])
phase_axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Compute total free energy summing complex and solvent for all replicates.
total_mean = [np.array(data['solvent'][i]) + np.array(data['complex'][i]) for i in range(5)]
total_mean, total_std = _mean_data_decomposition(total_mean)
# Compute and plot the phase free energy.
for phase_name in ['complex', 'solvent']:
color = phase_colors[phase_name]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
# Plot each phase data on a separate axis to make the comparison easier.
phase_axes[phase_name].plot(data[phase_name], ls='-', color=color,
label=label + ' ' + phase_name)
# Plot uncertainties.
phase_axes[phase_name].fill_between(x=list(range(len(std))), y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color='black', label=label+' total')
ax.plot(total_mean, color='black', label=label+' total')
ax.fill_between(x=list(range(len(total_std))), y1=total_mean-total_std,
y2=total_mean+total_std, color='black', alpha=0.7)
ax.set_ylabel(label + ' total [kcal/mol]')
ax.set_xlabel('simulation percentage')
# Make the range of all y axes the same.
ax.set_ylim((-21, -18))
phase_axes['complex'].set_ylim((-151.0, -148.0))
phase_axes['solvent'].set_ylim((129.0, 132.0))
def _plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy):
# Analyze only the complex.
phase_name = 'complex'
# Plot each phase data on a separate axis to make the comparison on different order of magnitudes easier.
# Receipt with three axes: https://matplotlib.org/3.1.0/gallery/ticks_and_spines/multiple_yaxis_with_spines.html
axes = {
'$\Delta$G': ax,
'$\Delta$H': ax.twinx(),
'-T$\Delta$S': ax.twinx(),
}
colors = {
'$\Delta$G': 'black',
'$\Delta$H': 'C1',
'-T$\Delta$S': 'C0',
}
for ax_name in sorted(axes):
axes[ax_name].set_ylabel(ax_name + ' ' + phase_name + ' [kcal/mol]', color=colors[ax_name])
axes[ax_name].spines["right"].set_position(("axes", 1.2))
# Variable used to propagate entropy decomposition.
entropy_std = []
# Plot the total average free energy and enthalpy and for each phase.
for data, label in [(phase_free_energies, '$\Delta$G'),
(phase_enthalpy, '$\Delta$H')]:
color = colors[label]
# Convert into a numpy array to take the mean.
# Convert None (not supported by numpy) into nans.
data[phase_name], std = _mean_data_decomposition(data[phase_name])
ns_replica = np.arange(0.0, 40.0, 40/len(std))
# Plot each phase data on a separate axis to make the comparison easier.
axes[label].plot(ns_replica, data[phase_name], ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=data[phase_name]-std,
y2=data[phase_name]+std, color=color, alpha=0.7)
# Propagate uncertainty.
if len(entropy_std) == 0:
entropy_std = std**2
else:
entropy_std += std**2
entropy_std = np.sqrt(entropy_std)
# Plot also entropies.
label = '-T$\Delta$S'
color = colors[label]
entropy = phase_free_energies[phase_name] - phase_enthalpy[phase_name]
axes[label].plot(ns_replica, entropy, ls='-', color=color, label=label+' '+phase_name)
# Plot uncertainties.
axes[label].fill_between(x=ns_replica, y1=entropy-entropy_std,
y2=entropy+entropy_std, color=color, alpha=0.7)
ax.set_xlabel('ns/replica')
def plot_decomposition(system_name, starting_iteration, type, output_file_path):
"""
Decomposition of the free energy trajectory in complex/solvent phase or entropy/enthalpy.
Parameters
----------
type : str
Can be 'entropy-enthalpy' or 'phase'.
"""
data_file_pattern = 'YankAnalysis/BiasAnalysis/iter{}/fe-decomposition-{}-{{}}.json'.format(
starting_iteration, system_name)
n_replicates = 5
phase_free_energies = {'complex': [[] for _ in range(n_replicates)],
'solvent': [[] for _ in range(n_replicates)]}
phase_enthalpy = copy.deepcopy(phase_free_energies)
for replicate_idx in range(n_replicates):
# Read decomposition data.
decomposition_data_file_path = data_file_pattern.format(replicate_idx)
with open(decomposition_data_file_path, 'r') as f:
decomposition_data = json.load(f)
# Read free energy and enthalpy at each iteration.
sorted_decomposition_data = sorted(decomposition_data, key=lambda x: int(x.split('-')[1]))
for phase_iter in sorted_decomposition_data:
decomposition = decomposition_data[phase_iter]
phase_name, iteration = phase_iter.split('-')
# Correct sign consistent with thermodynamic cycle.
if phase_name == 'complex':
sign = -1
else:
sign = 1
corrected_free_energy = sign * (decomposition['DeltaF'] + decomposition['DeltaF_standard_state_correction'])
phase_free_energies[phase_name][replicate_idx].append(corrected_free_energy)
# Multiplication works only if enthalpy is not None.
if decomposition['DeltaH'] is not None:
decomposition['DeltaH'] *= sign
phase_enthalpy[phase_name][replicate_idx].append(decomposition['DeltaH'])
# Create figure.
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.25, 4.6))
if type == 'entropy-enthalpy':
_plot_entropy_enthalpy_decomposition(ax, phase_free_energies, phase_enthalpy)
else:
_plot_phase_decomposition(ax, phase_free_energies)
# # Plot total free energy.
# total = data['solvent'] + data['complex']
# ax.plot(total, color=color, label=label)
# totals.append(total)
# Plot also entropies.
# ax.plot(totals[0] - totals[1], color='blue', label='-T$\Delta$S')
# ax.set_ylim((-20, -18))
# phase_axes['complex'].set_ylim((-153, -148))
# phase_axes['solvent'].set_ylim((128, 133))
# ax.set_ylim((-23, -18))
# phase_axes['complex'].set_ylim((30, 45))
# phase_axes['solvent'].set_ylim((-55, -40))
# ax.legend()
plt.tight_layout()
if output_file_path is not None:
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
plt.savefig(output_file_path)
else:
plt.show()
# =============================================================================
# RELATIVE EFFICIENCY ANALYSIS
# =============================================================================
def get_relative_efficiency_input(submission, yank_analysis, system_name):
"""Prepare the data to compute the mean relative efficiencies for this system."""
# For GROMACS/EE-fullquil we need to account for the extra equilibration
# cost and shift all energy evaluation to the right.
if submission.paper_name == 'GROMACS/EE-fullequil':
mean_free_energies = submission.mean_free_energies()
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_shifted = mean_data['N energy evaluations'].values[0]
last_shifted = mean_data['N energy evaluations'].values[-1]
calibration_cost = first_shifted*100/99 - last_shifted/99
else:
calibration_cost = 0
# Isolate the data for the system.
data_sub = submission.data[submission.data['System name'] == system_name]
n_energy_evaluations = max(data_sub['N energy evaluations'])
data_ref = yank_analysis.get_free_energies_from_energy_evaluations(
n_energy_evaluations, system_name=system_name, mean_trajectory=False,
start=calibration_cost)
# Obtain the free energies for the submission.
n_replicates = 5
free_energy_sub = np.empty(shape=(n_replicates, 100))
free_energy_ref = np.empty(shape=(n_replicates, 100))
for data, free_energy in [
(data_sub, free_energy_sub),
(data_ref, free_energy_ref),
]:
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = data[data['System ID'] == system_id]
free_energy[i] = system_id_data[DG_KEY].values
# Discard the initial frames of REVO and GROMACS/EE that don't have predictions.
from pkganalysis.efficiency import discard_initial_zeros
free_energy_ref, free_energy_sub = discard_initial_zeros(free_energy_ref, free_energy_sub)
# Determine the actual asymptotic free energy of YANK.
asymptotic_free_energy_ref = yank_analysis.get_reference_free_energies()[system_name]
return free_energy_ref, free_energy_sub, asymptotic_free_energy_ref
def compute_all_relative_efficiencies(
free_energy_A, free_energy_B, ci, n_bootstrap_samples,
asymptotic_free_energy_A=None, asymptotic_free_energy_B=None
):
from pkganalysis.efficiency import EfficiencyAnalysis
analysis = EfficiencyAnalysis(free_energy_A, free_energy_B,
asymptotic_free_energy_A,
asymptotic_free_energy_B)
std_rel_eff = analysis.compute_std_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
abs_bias_rel_eff = analysis.compute_abs_bias_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
rmse_rel_eff = analysis.compute_rmse_relative_efficiency(
confidence_interval=ci, n_bootstrap_samples=n_bootstrap_samples)
if ci is None:
rel_eff = [std_rel_eff, abs_bias_rel_eff, rmse_rel_eff]
return rel_eff
else:
rel_eff = [std_rel_eff[0], abs_bias_rel_eff[0], rmse_rel_eff[0]]
cis = [std_rel_eff[1], abs_bias_rel_eff[1], rmse_rel_eff[1]]
return rel_eff, cis
def plot_relative_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000,
same_plot=False, step_cumulative=2):
sns.set_style('whitegrid')
sns.set_context('paper')
statistic_names = ['std', 'absolute bias', 'RMSE']
# Create output directory.
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
# Check if we need all the efficiencies in the same plot or not.
if same_plot:
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
# Keep track of data range by statistic.
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
# Keep track of n_energy_evaluations by column.
max_n_energy_evaluations = [0 for _ in range(3)]
for submission in submissions:
if submission.paper_name in {'OpenMM/REVO'}:
continue
# if submission.paper_name in {'AMBER/APR', 'GROMACS/NS-DS/SB', 'GROMACS/NS-DS/SB-long',
# 'NAMD/BAR', 'GROMACS/EE', 'GROMACS/EE-fullequil', 'OpenMM/SOMD'}:
# continue
print(submission.paper_name)
system_names = submission.data['System name'].unique()
# Create figure.
if not same_plot:
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name:
system_names = system_names[~(system_names == 'CB8-G3')]
fig, axes = plt.subplots(nrows=3, ncols=len(system_names),
figsize=(7.25, 8))
statistic_ranges = {name: [np.inf, 0] for name in statistic_names}
for col_idx, system_name in enumerate(system_names):
color = SUBMISSION_COLORS[submission.paper_name]
# For GROMACS/EE, there are no submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and system_name != 'CB8-G3':
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
rel_eff = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Use the same asymptotic free energies to compute the absolute bias
# relative efficiency as a function of the simulation length.
asymptotic_free_energy_sub = free_energy_sub.mean(axis=0)[-1]
# # Print relative efficiencies.
# print(system_name, ci)
# if ci is not None:
# for rel_eff, bounds in zip(rel_eff, cis):
# print('\t', rel_eff, bounds.tolist())
# else:
# for rel_eff in rel_eff:
# print('\t', rel_eff)
# Compute mean efficiencies as a function of the length of the simulation.
n_costs = free_energy_ref.shape[1]
n_rel_eff = int(n_costs / step_cumulative)
relative_efficiencies = np.empty(shape=(3, n_rel_eff))
low_bounds = np.empty(shape=(3, n_rel_eff))
high_bounds = np.empty(shape=(3, n_rel_eff))
for i, c in enumerate(range(step_cumulative-1, n_costs, step_cumulative)):
c1 = c + 1
rel_eff = compute_all_relative_efficiencies(
free_energy_ref[:,:c1], free_energy_sub[:,:c1],
ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref,
asymptotic_free_energy_B=asymptotic_free_energy_sub
)
if ci is not None:
rel_eff, cis = rel_eff # Unpack confidence intervals.
# Update CI lower and upper bound.
relative_efficiencies[:,i] = rel_eff
if ci is not None:
low_bounds[:,i] = [x[0] for x in cis]
high_bounds[:,i] = [x[1] for x in cis]
# Get number of energy evaluations.
mean_data = submission.mean_free_energies(system_name=system_name)
# Check how many initial iteration have been discarded.
discarded_iterations = 100 - n_costs
n_energy_evaluations = mean_data['N energy evaluations'].values[
discarded_iterations+1::step_cumulative] / 1e6
for row_idx, rel_eff in enumerate(relative_efficiencies):
ax = axes[row_idx][col_idx]
ax.plot(n_energy_evaluations, rel_eff, color=color, label=submission.paper_name)
# Plot back line at 0.
ax.plot(n_energy_evaluations, [0 for _ in n_energy_evaluations], color='black', ls='--')
# Update data range.
statistic_range = statistic_ranges[statistic_names[row_idx]]
# if ci is None:
# min_rel_eff = min(rel_eff)
# max_rel_eff = max(rel_eff)
# else:
# min_rel_eff = min(*rel_eff, *low_bounds[row_idx])
# max_rel_eff = max(*rel_eff, *high_bounds[row_idx])
statistic_range[0] = min(statistic_range[0], min(rel_eff))
statistic_range[1] = max(statistic_range[1], max(rel_eff))
# Update x-axis range.
if same_plot:
max_n_energy_evaluations[col_idx] = max(max_n_energy_evaluations[col_idx],
n_energy_evaluations[-1])
else:
for row_idx in range(len(statistic_names)):
axes[row_idx][col_idx].set_xlim((0, n_energy_evaluations[-1]))
if ci is not None:
# Plot confidence intervals.
for row_idx, (low_bound_c, high_bound_c) in enumerate(zip(low_bounds, high_bounds)):
ax = axes[row_idx][col_idx]
ax.fill_between(n_energy_evaluations, low_bound_c, high_bound_c,
alpha=0.35, color='gray')
# We do this multiple times unnecessarily if same_plot is True, but the code is simpler.
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel(statistic_name + ' rel eff')
for col_idx in range(len(system_names)):
if same_plot:
extra_space = 0.1
else:
# Make space for confidence intervals.
extra_space = 1
ylimits = (statistic_ranges[statistic_name][0] - extra_space,
statistic_ranges[statistic_name][1] + extra_space)
axes[row_idx][col_idx].set_ylim(ylimits)
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
axes[-1][1].set_xlabel('Number of force/energy evaluations [10$^6$]')
# Set labels and axes limits.
if not same_plot:
fig.suptitle(submission.paper_name)
output_file_base_name = 'releff-{}-{}'.format(submission.file_name, submission.receipt_id)
output_file_base_path = os.path.join(figure_dir_path, output_file_base_name)
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
if same_plot:
for row_idx in range(len(statistic_names)):
for col_idx in range(len(system_names)):
axes[row_idx][col_idx].set_xlim((0, max_n_energy_evaluations[col_idx]))
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
output_file_base_path = os.path.join(figure_dir_path, 'relative-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def plot_absolute_efficiencies(submissions, yank_analysis, ci=0.95, n_bootstrap_samples=1000):
sns.set_style('whitegrid')
sns.set_context('paper')
# Keep track of data range by statistic.
statistic_names = ['std', 'absolute bias', 'RMSE']
# Keep track of maximum number of energy evaluations
# to determine plotting range for YANK.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
max_n_energy_eval = {name: 0 for name in system_names}
# Create figure.
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(7.25, 8))
for submission in submissions + [yank_analysis]:
if 'REVO' in submission.paper_name:
continue
print(submission.paper_name)
# Obtain std, bias, and RMSE of the 5 trajectories.
# If this is a YANK analysis, we get it later specifically for the system.
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
color = SUBMISSION_COLORS[submission.paper_name]
for col_idx, system_name in enumerate(system_names):
# GROMACS/EE doesn't have submissions for CB8-G3.
if 'GROMACS/EE' in submission.paper_name and system_name == 'CB8-G3':
continue
# For GROMACS/NS-DS/SB-long there are no new submissions for OAs.
if 'GROMACS/NS-DS/SB-long' in submission.paper_name and 'OA' in system_name:
# Just add the label.
axes[0][col_idx].plot([], color=color, label=submission.paper_name)
continue
# Select the submission data for only this host-guest system.
if isinstance(submission, YankSamplingAnalysis):
line_style = '--'
mean_data = submission.get_free_energies_from_energy_evaluations(
max_n_energy_eval[system_name], system_name=system_name, mean_trajectory=True)
else:
line_style = '-'
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
# Update maximum number of energy evaluations.
n_energy_evaluations = mean_data['N energy evaluations'].values
max_n_energy_eval[system_name] = max(max_n_energy_eval[system_name], n_energy_evaluations[-1])
# Discard initial computational costs for which there's no data.
first_nonzero_idx = np.nonzero(mean_data[DG_KEY])[0][0]
n_energy_evaluations = n_energy_evaluations[first_nonzero_idx:]
# Compute cumulative total std, abs_bias, and RMSE.
scale_energy_evaluations = 1e6
norm_factor = (n_energy_evaluations - n_energy_evaluations[0])[1:] / scale_energy_evaluations
avg_std = sp.integrate.cumtrapz(mean_data['std'].values[first_nonzero_idx:]) / norm_factor
avg_abs_bias = sp.integrate.cumtrapz(np.abs(mean_data['bias'].values[first_nonzero_idx:])) / norm_factor
avg_rmse = sp.integrate.cumtrapz(mean_data['RMSE'].values[first_nonzero_idx:]) / norm_factor
# Plot total statistics as a function of the energy evaluations.
# Discard first energy evaluation as cumtrapz doesn't return a result for it.
for row_idx, avg_stats in enumerate([avg_std, avg_abs_bias, avg_rmse]):
ax = axes[row_idx, col_idx]
ax.plot(n_energy_evaluations[1:] / scale_energy_evaluations, avg_stats,
color=color, label=submission.paper_name, ls=line_style)
# Set x axis.
ax.set_xlim((0, n_energy_evaluations[-1] / scale_energy_evaluations))
# Set labels and axes limits.
y_limits = {
'std': (0, 0.4),
'absolute bias': (0, 0.3),
'RMSE': (0, 0.4)
}
for col_idx, system_name in enumerate(system_names):
axes[0][col_idx].set_title(system_name)
# Set y limits (shared for each row).
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][col_idx].set_ylim(y_limits[statistic_name])
axes[row_idx][col_idx].tick_params(axis='y', which='major', pad=0.1)
# # Remove shared ticks.
# for row_idx in range(len(statistic_names)):
# for col_idx in range(len(system_names)):
# if col_idx > 0:
# axes[row_idx][col_idx].set_yticklabels([])
# if row_idx < len(statistic_names)-1:
# axes[row_idx][col_idx].set_xticklabels([])
for row_idx, statistic_name in enumerate(statistic_names):
axes[row_idx][0].set_ylabel('mean ' + statistic_name + ' [kcal/mol]')
axes[-1][1].set_xlabel('N energy evaluations [M]')
axes[0][1].legend(loc='upper right', bbox_to_anchor=(2.0, 1.48),
fancybox=True, ncol=3)
figure_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-efficiencies')
os.makedirs(figure_dir_path, exist_ok=True)
output_file_base_path = os.path.join(figure_dir_path, 'absolute-efficiencies')
plt.savefig(output_file_base_path + '.pdf')
# plt.savefig(output_file_base_path + '.png', dpi=600)
# plt.show()
def print_relative_efficiency_table(
submissions, yank_analysis, ci=0.95,
n_bootstrap_samples=100,
print_bias_corrected=False
):
"""Create a table with standard deviation, absolute bias, and RMSE relative efficiency."""
methods = []
# Initialize the table to be converted into a Pandas dataframe.
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
statistic_names = [r'$e_{\mathrm{std}}$', r'$e_{|\mathrm{bias}|}$', r'$e_{\mathrm{RMSD}}$']
column_names = ['\\makecell{$\Delta$ G \\\\ $[$kcal/mol$]$}', '\\makecell{n eval \\\\ $[$M$]$}'] + statistic_names
# Add columns.
efficiency_table = collections.OrderedDict()
for system_name, column_name in itertools.product(system_names, column_names):
efficiency_table[(system_name, column_name)] = []
for submission in submissions:
# Collect method's names in the given order.
methods.append(submission.paper_name)
mean_free_energies = submission.mean_free_energies()
for system_name in system_names:
# CB8-G3 calculations for GROMACS/EE did not converge yet, and the
# long protocol in CS-NS calculations have been run only on CB8-G3.
if ((submission.name == 'Expanded-ensemble/MBAR' and system_name == 'CB8-G3') or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and system_name != 'CB8-G3')):
relative_efficiencies, relative_efficiencies_corrected = np.full((2, 3), fill_value=np.nan)
dg = ''
n_force_eval = ''
else:
# Get input for EfficiencyAnalysis.
free_energy_ref, free_energy_sub, asymptotic_free_energy_ref = get_relative_efficiency_input(
submission, yank_analysis, system_name)
# Get the relative efficiencies.
relative_efficiencies, cis = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples,
asymptotic_free_energy_A=asymptotic_free_energy_ref
)
# Recompute relative efficiencies assuming that YANK converged.
if print_bias_corrected:
relative_efficiencies_corrected, cis_corrected = compute_all_relative_efficiencies(
free_energy_ref, free_energy_sub, ci, n_bootstrap_samples)
# Select the data for only this host-guest system.
mean_data_sub = mean_free_energies[mean_free_energies['System name'] == system_name]
# Get the final free energy and number of energy/force evaluations.
dg = mean_data_sub[DG_KEY].values[-1]
dg_CI = mean_data_sub['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = mean_data_sub['N energy evaluations'].values[-1]
# Convert to string format.
dg = '{} $\\pm$ {}'.format(dg, dg_CI)
n_force_eval = str(int(round(n_force_eval / 1e6)))
# Add free energy and cost entries.
efficiency_table[(system_name, column_names[0])].append(dg)
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# Add efficiency entries for the table.
for statistic_idx, statistic_name in enumerate(statistic_names):
# Gather the format arguments.
rel_effs = [relative_efficiencies[statistic_idx], cis[statistic_idx][0], cis[statistic_idx][1]]
if print_bias_corrected:
rel_effs.append(relative_efficiencies_corrected[statistic_idx])
# Comment this if we don't want to print CIs for the corrected estimate.
rel_effs.extend([cis_corrected[statistic_idx][0], cis_corrected[statistic_idx][1]])
# Print significant digits.
efficiencies_format = []
for e_idx in range(0, len(rel_effs), 3):
rel_eff, low_bound, high_bound = rel_effs[e_idx:e_idx+3]
if high_bound - rel_eff < 0.1 or rel_eff - low_bound < 0.1:
fmt = '{:2.2f}'
else:
fmt = '{:2.1f}'
# Print lower and higher bound as sub and superscripts of the estimate.
efficiencies_format.append(fmt + '$_{{\raisem{{2pt}}{{' + fmt + '}}}}^{{\mathstrut ' + fmt + '}}$')
if np.isnan(rel_effs[0]):
data_entry = ''
# Standard deviation efficiency is not affected by the bias.
elif print_bias_corrected and ('std' not in statistic_name):
data_entry = efficiencies_format[0] + ' (' + efficiencies_format[1] + ')'
data_entry = data_entry.format(*rel_effs)
else:
data_entry = efficiencies_format[0].format(*rel_effs[:3])
# Remove the minus sign from "-0".
data_entry = data_entry.replace('-0.0', '0.0')
data_entry = data_entry.replace('-0.00', '0.00')
efficiency_table[(system_name, statistic_name)].append(data_entry)
# Add row for reference calculation.
methods.append(YANK_METHOD_PAPER_NAME)
# Add free energy and cost entries.
for system_name in system_names:
yank_mean_data = yank_analysis.get_free_energies_from_iteration(
YANK_N_ITERATIONS, system_name=system_name, mean_trajectory=True)
dg = yank_mean_data[DG_KEY].values[-1]
dg_CI = yank_mean_data['$\Delta$G CI'].values[-1] # Confidence interval.
dg, dg_CI = reduce_to_first_significant_digit(dg, dg_CI)
n_force_eval = yank_mean_data['N energy evaluations'].values[-1]
n_force_eval = str(int(round(n_force_eval / 1e6)))
efficiency_table[(system_name, column_names[0])].append('{} $\\pm$ {}'.format(dg, dg_CI))
efficiency_table[(system_name, column_names[1])].append(n_force_eval)
# All efficiencies are relative to YANK so they're all 1.
for system_name, statistic_name in itertools.product(system_names, statistic_names):
efficiency_table[(system_name, statistic_name)].append('0.0')
# Convert to Pandas Dataframe.
efficiency_table = pd.DataFrame(efficiency_table)
# Set the method's names as index column.
efficiency_table = efficiency_table.assign(Method=methods)
efficiency_table.set_index(keys='Method', inplace=True)
# Print table.
column_format = 'lccccc|ccccc|ccccc'
efficiency_table_latex = efficiency_table.to_latex(column_format=column_format, multicolumn_format='c',
escape=False)
# Make header and reference method bold.
textbf = lambda s: '\\textbf{' + s + '}'
efficiency_table_latex = efficiency_table_latex.replace(YANK_METHOD_PAPER_NAME, textbf(YANK_METHOD_PAPER_NAME))
efficiency_table_latex = efficiency_table_latex.replace('Method', textbf('Method'))
for system_name in system_names:
efficiency_table_latex = efficiency_table_latex.replace(system_name, textbf(system_name))
for column_name in column_names:
efficiency_table_latex = efficiency_table_latex.replace(column_name, textbf(column_name))
print(efficiency_table_latex)
def print_nonequilibrium_relative_efficiencies(nonequilibrium_submissions):
"""Print relative efficiencies w.r.t. for the nonequilibrium estimators table."""
system_names = ['CB8-G3', 'OA-G3', 'OA-G6']
def _get_free_energy_array(submission, system_name, step=1, max_c=100, get_asymptotic=False):
n_replicates = 5
system_data = submission.data[submission.data['System name'] == system_name]
free_energy_array = np.empty(shape=(n_replicates, int(max_c/step)))
for i in range(n_replicates):
system_id = system_name + '-' + str(i)
system_id_data = system_data[system_data['System ID'] == system_id]
free_energy_array[i] = system_id_data[DG_KEY].values[:max_c:step]
if get_asymptotic:
mean_free_energies = submission.mean_free_energies()
asymptotic = mean_free_energies[mean_free_energies['System name'] == system_name][DG_KEY].values[-1]
return free_energy_array, asymptotic
return free_energy_array
# Use GROMACS/NS-DS/SB-long as reference method.
reference_submission = [s for s in nonequilibrium_submissions if s.paper_name == 'GROMACS/NS-DS/SB-long'][0]
# Also remove the other BAR submission.
nonequilibrium_submissions = [s for s in nonequilibrium_submissions if 'GROMACS/NS-DS/SB' not in s.paper_name]
# Get only the first 50 as the 1-directional estimators only have half the cost.
free_energy_ref = {}
asymptotic_ref = {}
for system_name in system_names:
DG, asympt = _get_free_energy_array(reference_submission, system_name, max_c=50, get_asymptotic=True)
free_energy_ref[system_name] = DG
asymptotic_ref[system_name] = asympt
for submission in nonequilibrium_submissions:
print(submission.paper_name, end='')
for system_name in system_names:
free_energy_sub = _get_free_energy_array(submission, system_name, step=2)
rel_eff, cis = compute_all_relative_efficiencies(
free_energy_ref[system_name], free_energy_sub, ci=0.95, n_bootstrap_samples=1000,
asymptotic_free_energy_A=asymptotic_ref[system_name],
asymptotic_free_energy_B=asymptotic_ref[system_name]
)
for i, stat_name in enumerate(['std', 'bias', 'RMSE']):
print(r' & {:.1f}$_{{\raisem{{2pt}}{{{:.1f}}}}}^{{\mathstrut {:.1f}}}$'.format(rel_eff[i], cis[i][0], cis[i][1]), end='')
print(r' \\')
def print_final_prediction_table(submissions, yank_analysis):
"""Plot the table containing the fina binding free energy predictions for all replicates."""
for submission in submissions + [yank_analysis]:
# GROMACS/EE-fullequil predictions are identical to GROMACS/EE
if submission.paper_name == 'GROMACS/EE-fullequil':
continue
if isinstance(submission, YankSamplingAnalysis):
submission_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS)
else:
submission_data = submission.data
submission_data = submission_data[submission_data['Simulation percentage'] == 100]
row_str = submission.paper_name + ' & '
submission_final_DGs = []
for system_id in submission_data['System ID'].unique():
# GROMACS/EE doesn't have predictions for CB8-G3, and the
# GROMACS/NS-DS/SB-long protocol was applied only to CB8-G3.
if (('GROMACS/EE' in submission.paper_name and 'CB8-G3' in system_id) or
(submission.paper_name == 'GROMACS/NS-DS/SB-long' and 'OA' in system_id)):
submission_final_DGs.append('')
continue
dg = submission_data.loc[submission_data['System ID'] == system_id, DG_KEY].values[0]
ddg = submission_data.loc[submission_data['System ID'] == system_id, DDG_KEY].values[0]
dg, ddg = reduce_to_first_significant_digit(dg, ddg)
submission_final_DGs.append(r'{} $\pm$ {}'.format(dg, ddg))
row_str += ' & '.join(submission_final_DGs) + r' \\'
print(row_str)
# =============================================================================
# SUPPORTING INFORMATION - SINGLE TRAJECTORIES
# =============================================================================
def plot_single_trajectories_figures(axes, system_data, system_mean_data,
reference_system_mean_data=None,
plot_errors=True, plot_methods_uncertainties=True):
"""Plot individual free energy trajectories and standard deviations for a single method and system."""
system_name = system_data['System name'].unique()[0]
palette_mean = sns.color_palette('pastel')
submission_mean_color = 'black'
reference_mean_color = palette_mean[9]
# Plot the method uncertainties of the single replicate trajectories.
# First scale the number of energy evaluations.
system_data.loc[:,'N energy evaluations'] /= N_ENERGY_EVALUATIONS_SCALE
# Plot the 5 replicates individual trajectories.
# First remove the initial predictions that are 0.0 (i.e. there is no estimate).
ax = axes[0]
system_data = system_data[system_data[DG_KEY] != 0.0]
sns.lineplot(data=system_data, x='N energy evaluations', y=DG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# Plot the submission mean trajectory with CI.
plot_mean_free_energy(system_mean_data, x='N energy evaluations', ax=ax,
color_mean=submission_mean_color, plot_ci=False,
color_ci=submission_mean_color, label='Best estimate',
scale_n_energy_evaluations=True)
# Plot YANK mean trajectory with CI.
if reference_system_mean_data is not None:
plot_mean_free_energy(reference_system_mean_data, x='N energy evaluations', ax=ax,
color_mean=reference_mean_color, plot_ci=False,
color_ci=reference_mean_color, label='Reference estimate',
scale_n_energy_evaluations=True)
ax.set_title(system_name)
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
# Remove the legend for now, which will be added at the end after tighting up the plot.
ax.get_legend().remove()
# Create a bias axis.
if reference_system_mean_data is not None:
ref_free_energy = reference_free_energies.loc[system_name, DG_KEY]
with sns.axes_style('white'):
ax2 = ax.twinx()
# Plot a vertical line to make the scale.
vertical_line = np.linspace(*ax.get_ylim()) - ref_free_energy
ax2.plot([50] * len(vertical_line), vertical_line, alpha=0.0001)
ax2.grid(alpha=0.5, linestyle='dashed', zorder=0)
# We add the bias y-label only on the rightmost Axis.
if system_name == 'OA-G6':
ax2.set_ylabel('Bias to reference [kcal/mol]')
# Set the 0 of the twin axis to the YANK reference free energy.
align_yaxis(ax, ref_free_energy, ax2, 0.0)
if plot_errors:
# The x-axis is shared between the 2 rows so we can plot the ticks only in the bottom one.
ax.xaxis.set_ticklabels([])
ax.set_xlabel('')
ax = axes[1]
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if plot_methods_uncertainties:
sns.lineplot(data=system_data, x='N energy evaluations', y=DDG_KEY,
hue='System ID', palette='bright', ax=ax, alpha=0.6)
# The legend is added later at the top.
ax.get_legend().remove()
# Plot the standard deviation of the free energy trajectories.
# submission_std = system_mean_data['std']
submission_std = system_mean_data['unbiased_std']
# cost = system_mean_data['Simulation percentage'].values
cost = system_mean_data['N energy evaluations'].values / N_ENERGY_EVALUATIONS_SCALE
ax.plot(cost, submission_std, color=submission_mean_color)
# Plot confidence interval around standard deviation.
submission_std_low_ci = system_mean_data['unbiased_std_low_CI'].values
submission_std_up_ci = system_mean_data['unbiased_std_up_CI'].values
ax.fill_between(cost, submission_std_low_ci, submission_std_up_ci, alpha=0.35, color='gray')
if reference_system_mean_data is not None:
# reference_std = reference_system_mean_data['std']
reference_std = reference_system_mean_data['unbiased_std']
ax.plot(cost, reference_std, color=reference_mean_color)
# Only the central plot shows the x-label.
ax.set_xlabel('')
# Add the y-label only on the leftmost Axis.
if system_name != 'CB8-G3':
ax.set_ylabel('')
else:
ax.set_ylabel('std($\Delta$G) [kcal/mol]')
# Set x limits.
for ax in axes:
ax.set_xlim((0, max(system_data['N energy evaluations'])))
def plot_all_single_trajectories_figures(submissions, yank_analysis, plot_errors=True, output_path_dir=None):
"""Individual plots for each method with the 5 individual free energy and uncertainty trajectories."""
sns.set_style('whitegrid')
sns.set_context('paper')
if output_path_dir is None:
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-individual-trajectories/')
os.makedirs(output_path_dir, exist_ok=True)
# -------------------- #
# Plot submission data #
# -------------------- #
# Remove nonequilibrium-switching calculations with single-direction estimators.
submissions = [s for s in submissions if ('Jarz' not in s.paper_name and 'Gauss' not in s.paper_name)]
for submission in submissions + [yank_analysis]:
# CB8-G3 calculations for GROMACS/EE did not converge yet.
if submission.name == 'Expanded-ensemble/MBAR':
submission.data = submission.data[submission.data['System name'] != 'CB8-G3']
# REVO uses the mean of the 5 replicates to estimate the
# uncertainty so it doesn't add information.
if 'REVO' in submission.paper_name:
plot_methods_uncertainties = False
else:
plot_methods_uncertainties = True
if not isinstance(submission, YankSamplingAnalysis):
mean_free_energies = submission.mean_free_energies()
unique_system_names = submission.data['System name'].unique()
else:
unique_system_names = sorted(submission.system_names)
# Create a figure with 3 axes (one for each system).
n_systems = len(unique_system_names)
if plot_errors:
# The second row will plot the errors.
fig, axes = plt.subplots(nrows=2, ncols=n_systems, figsize=(7.25, 4.8))
trajectory_axes = axes[0]
else:
fig, axes = plt.subplots(nrows=1, ncols=n_systems, figsize=(7.25, 2.4))
trajectory_axes = axes
# Set figure title.
fig.suptitle(submission.paper_name)
# Determine range of data across systems.
min_DG = np.inf
max_DG = -np.inf
min_dDG = np.inf
max_dDG = -np.inf
# for system_name in unique_system_names:
for ax_idx, system_name in enumerate(unique_system_names):
if isinstance(submission, YankSamplingAnalysis):
data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = submission.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
else:
# Select the data for only this host-guest system.
data = submission.data[submission.data['System name'] == system_name]
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
plot_single_trajectories_figures(axes[:,ax_idx], data, mean_data, plot_errors=plot_errors,
reference_system_mean_data=None,
plot_methods_uncertainties=plot_methods_uncertainties)
# Collect max and min data to determine axes range.
min_DG = min(min_DG, min(data[DG_KEY]), min(mean_data[DG_KEY]))
max_DG = max(max_DG, max(data[DG_KEY]), max(mean_data[DG_KEY]))
min_dDG = min(min_dDG, min(data[DDG_KEY]), min(mean_data['std']))
max_dDG = max(max_dDG, max(data[DDG_KEY]), max(mean_data['std']))
# Set limits.
for i in range(len(unique_system_names)):
axes[0][i].set_ylim((min_DG, max_DG))
axes[1][i].set_ylim((min_dDG, max_dDG))
# Keep ticks only in external plots.
axes[0][i].set_xticklabels([])
for i in range(1, len(unique_system_names)):
axes[0][i].set_yticklabels([])
axes[1][i].set_yticklabels([])
# The x-label is shown only in the central plot.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.2, rect=[0.0, 0.0, 1.0, 0.85])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = trajectory_axes[0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (-0.1, 1.35)
trajectory_axes[0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'replicates-{}-{}'.format(submission.file_name, submission.receipt_id)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# SUPPORTING INFORMATION - HREX/MBAR STATISTICAL INEFFICIENCY ANALYSIS
# =============================================================================
def plot_hrex_stat_ineff_trajectories():
"""Individual plots for HREX with the 5 individual free energy and uncertainty trajectories
as a function of the statistical inefficiency."""
sns.set_context('paper')
# Limits of y-axis (free energies, uncertainties) by system.
y_limits = {
'CB8-G3': [(-14, -10), (0, 2)],
'OA-G3': [(-9, -5), (0, 1.5)],
'OA-G6': [(-9, -5), (0, 1.5)],
}
# Create output dir.
output_path_dir = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-statistical-inefficiency')
os.makedirs(output_path_dir, exist_ok=True)
# Read the data, which is organized by statistical inefficiency.
# We'll then plot by system.
yank_analysis_by_statineff = collections.OrderedDict()
for stat_ineff in ['5', '10', '20', '50', '100', '200']:
data_dir_path = os.path.join('YankAnalysis', 'CorrelationAnalysis', 'statineff-{}'.format(stat_ineff))
yank_analysis = YankSamplingAnalysis(data_dir_path)
yank_analysis_by_statineff[stat_ineff] = yank_analysis
# Plot by system.
for system_name in ['CB8-G3', 'OA-G3', 'OA-G6']:
fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(7.25, 9.8))
# Set figure title.
fig.suptitle('HREX uncertainty predictions as a function of\n'
'statistical inefficiency for {}'.format(system_name))
# for system_name in unique_system_names:
for stat_ineff_idx, stat_ineff in enumerate(yank_analysis_by_statineff):
yank_analysis = yank_analysis_by_statineff[stat_ineff]
data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name)
mean_data = yank_analysis.get_free_energies_from_iteration(final_iteration=YANK_N_ITERATIONS,
system_name=system_name,
mean_trajectory=True)
# Plot on the correct axis.
DG_row = 2*int(stat_ineff_idx / 3)
col = stat_ineff_idx % 3
stat_ineff_axes = axes[DG_row:DG_row+2, col]
plot_single_trajectories_figures(stat_ineff_axes, data, mean_data, plot_errors=True,
reference_system_mean_data=None,
plot_methods_uncertainties=True)
# Set titles and limits.
title = 'Statistical inefficiency: {} ps'.format(stat_ineff)
if DG_row > 0:
title = '\n' + title
stat_ineff_axes[0].set_title(title, fontweight='bold')
stat_ineff_axes[0].set_ylim(y_limits[system_name][0])
stat_ineff_axes[1].set_ylim(y_limits[system_name][1])
stat_ineff_axes[0].set_ylabel('$\Delta$G [kcal/mol]')
stat_ineff_axes[1].set_ylabel('std($\Delta$G) [kcal/mol]')
# Keep ticks only in external plots.
for row_idx in range(axes.shape[0]):
for col_idx in range(axes.shape[1]):
if row_idx != len(axes[0]) - 1:
axes[row_idx][col_idx].set_xticklabels([])
if col_idx != 0:
axes[row_idx][col_idx].set_ylabel('')
axes[row_idx][col_idx].set_yticklabels([])
# Set x label.
axes[-1][1].set_xlabel('N energy evaluations [10$^6$]')
plt.tight_layout(pad=0.0, rect=[0.0, 0.0, 1.0, 0.88])
# Create legend.
# The first handle/label is the legend title "System ID" so we get rid of it.
handles, labels = axes[0][0].get_legend_handles_labels()
labels = ['replicate ' + str(i) for i in range(5)] + labels[6:]
bbox_to_anchor = (0.05, 1.35)
axes[0][0].legend(handles=handles[1:], labels=labels, loc='upper left',
bbox_to_anchor=bbox_to_anchor, ncol=6, fancybox=True,
labelspacing=0.8, handletextpad=0.5, columnspacing=1.2)
# Save figure.
output_file_name = 'statineff-{}'.format(system_name)
plt.savefig(os.path.join(output_path_dir, output_file_name + '.pdf'))
# plt.savefig(os.path.join(output_path_dir, output_file_name + '.png'), dpi=300)
# plt.show()
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
sns.set_style('whitegrid')
sns.set_context('paper')
# Read reference values.
yank_analysis = YankSamplingAnalysis(YANK_ANALYSIS_DIR_PATH)
# Obtain free energies and final reference values.
mean_reference_free_energies = yank_analysis.get_free_energies_from_iteration(YANK_N_ITERATIONS, mean_trajectory=True)
reference_free_energies = mean_reference_free_energies[mean_reference_free_energies['Simulation percentage'] == 100]
reference_free_energies.set_index('System name', inplace=True)
# Compute efficiency of reference.
reference_efficiencies = {}
for system_name in mean_reference_free_energies['System name'].unique():
mean_data = mean_reference_free_energies[mean_reference_free_energies ['System name'] == system_name]
reference_efficiencies[system_name], n_discarded = fit_efficiency(mean_data)
# Import user map.
with open('../SubmissionsDoNotUpload/SAMPL6_user_map.csv', 'r') as f:
user_map = pd.read_csv(f)
# Load submissions data. We do OA and TEMOA together.
all_submissions = load_submissions(SamplingSubmission, SAMPLING_SUBMISSIONS_DIR_PATH, user_map)
# Remove AMBER/TI.
all_submissions = [s for s in all_submissions if s.name not in ['Langevin/Virtual Bond/TI']]
# Create an extra submission for GROMACS/EE where the full cost of equilibration has been taken into account.
gromacs_ee_submission = copy.deepcopy([s for s in all_submissions if s.paper_name == 'GROMACS/EE'][0])
gromacs_ee_submission.paper_name = 'GROMACS/EE-fullequil'
gromacs_ee_submission.file_name = 'EENVT-fullequil'
data = gromacs_ee_submission.data # Shortcut.
mean_free_energies = gromacs_ee_submission.mean_free_energies()
for system_name in ['OA-G3', 'OA-G6']:
mean_data = mean_free_energies[mean_free_energies['System name'] == system_name]
first_nonzero_idx = np.nonzero(mean_data[DG_KEY].values)[0][0]
full_equilibration_cost = mean_data['N energy evaluations'].values[first_nonzero_idx] * 4
for i in data[data['System name'] == system_name].index:
data.at[i, 'N energy evaluations'] += full_equilibration_cost
all_submissions.append(gromacs_ee_submission)
# Sort the submissions to have all pot and tables in the same order.
all_submissions = sorted(all_submissions, key=lambda s: s.paper_name)
# Separate the main submissions from the data about nonequilibrium estimators.
main_submissions = [s for s in all_submissions if not ('Jarz' in s.paper_name or 'Gauss' in s.paper_name)]
noneq_submissions = [s for s in all_submissions if 'NS' in s.paper_name]
# Export YANK analysis and submissions to CSV/JSON tables.
yank_analysis.export(os.path.join(SAMPLING_DATA_DIR_PATH, 'reference_free_energies'))
for s in main_submissions:
file_base_path = os.path.join(SAMPLING_DATA_DIR_PATH, s.receipt_id + '-reference')
yank_analysis.export_by_submission(file_base_path, s)
export_submissions(all_submissions, reference_free_energies)
# Create example trajectory for the figure describing the challenge process.
plot_example_bias_variance(yank_analysis, max_n_eval_percentage=0.4, mixed_proportion=0.3)
# Cartoon explaining mean error and relative efficiency.
plot_mean_error_cartoon()
# Create figure with free energy, standard deviation, and bias as a function of computational cost.
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=False)
plot_all_entries_trajectory(main_submissions, yank_analysis, zoomed=True)
# Create results and efficiency table.
print_relative_efficiency_table(main_submissions, yank_analysis, print_bias_corrected=False)
# Plot nonequilibrium-switching single-direction estimator.
plot_all_nonequilibrium_switching(noneq_submissions)
# Plot sensitivity analysis figure.
plot_restraint_and_barostat_analysis()
# Plot figure for HREX bias analysis.
plot_yank_bias()
# Supporting information
# ----------------------
# Absolute/relative efficiency as a function of the computational cost.
plot_relative_efficiencies(main_submissions, yank_analysis)
plot_relative_efficiencies(main_submissions, yank_analysis, ci=None, same_plot=True)
plot_absolute_efficiencies(main_submissions, yank_analysis)
# Relative efficiency for uni/bi-directional estimators.
print_nonequilibrium_relative_efficiencies(noneq_submissions)
# Plot replicate predictions table.
print_final_prediction_table(all_submissions, yank_analysis)
# Plot individual trajectories.
plot_all_single_trajectories_figures(all_submissions, yank_analysis)
# Plot statistical inefficiency analysis.
plot_hrex_stat_ineff_trajectories()
# Supporting information for bias section.
output_dir_path = os.path.join(SAMPLING_PAPER_DIR_PATH, 'SI_Figure-bias_hrex')
plot_decomposition('CB8-G3', starting_iteration=5, type='phase',
output_file_path=output_dir_path + '/free-energy-phase-decomposition.pdf'))
plot_decomposition('CB8-G3', starting_iteration=5, type='entropy-enthalpy',
output_file_path=output_dir_path + '/free-energy-entropy-decomposition.pdf')
| mit |
ClimbsRocks/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
hainm/statsmodels | examples/python/robust_models_1.py | 25 | 8588 |
## M-Estimators for Robust Linear Modeling
from __future__ import print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
# * An M-estimator minimizes the function
#
# $$Q(e_i, \rho) = \sum_i~\rho \left (\frac{e_i}{s}\right )$$
#
# where $\rho$ is a symmetric function of the residuals
#
# * The effect of $\rho$ is to reduce the influence of outliers
# * $s$ is an estimate of scale.
# * The robust estimates $\hat{\beta}$ are computed by the iteratively re-weighted least squares algorithm
# * We have several choices available for the weighting functions to be used
norms = sm.robust.norms
def plot_weights(support, weights_func, xlabels, xticks):
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(support, weights_func(support))
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels, fontsize=16)
ax.set_ylim(-.1, 1.1)
return ax
#### Andrew's Wave
help(norms.AndrewWave.weights)
a = 1.339
support = np.linspace(-np.pi*a, np.pi*a, 100)
andrew = norms.AndrewWave(a=a)
plot_weights(support, andrew.weights, ['$-\pi*a$', '0', '$\pi*a$'], [-np.pi*a, 0, np.pi*a]);
#### Hampel's 17A
help(norms.Hampel.weights)
c = 8
support = np.linspace(-3*c, 3*c, 1000)
hampel = norms.Hampel(a=2., b=4., c=c)
plot_weights(support, hampel.weights, ['3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Huber's t
help(norms.HuberT.weights)
t = 1.345
support = np.linspace(-3*t, 3*t, 1000)
huber = norms.HuberT(t=t)
plot_weights(support, huber.weights, ['-3*t', '0', '3*t'], [-3*t, 0, 3*t]);
#### Least Squares
help(norms.LeastSquares.weights)
support = np.linspace(-3, 3, 1000)
lst_sq = norms.LeastSquares()
plot_weights(support, lst_sq.weights, ['-3', '0', '3'], [-3, 0, 3]);
#### Ramsay's Ea
help(norms.RamsayE.weights)
a = .3
support = np.linspace(-3*a, 3*a, 1000)
ramsay = norms.RamsayE(a=a)
plot_weights(support, ramsay.weights, ['-3*a', '0', '3*a'], [-3*a, 0, 3*a]);
#### Trimmed Mean
help(norms.TrimmedMean.weights)
c = 2
support = np.linspace(-3*c, 3*c, 1000)
trimmed = norms.TrimmedMean(c=c)
plot_weights(support, trimmed.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Tukey's Biweight
help(norms.TukeyBiweight.weights)
c = 4.685
support = np.linspace(-3*c, 3*c, 1000)
tukey = norms.TukeyBiweight(c=c)
plot_weights(support, tukey.weights, ['-3*c', '0', '3*c'], [-3*c, 0, 3*c]);
#### Scale Estimators
# * Robust estimates of the location
x = np.array([1, 2, 3, 4, 500])
# * The mean is not a robust estimator of location
x.mean()
# * The median, on the other hand, is a robust estimator with a breakdown point of 50%
np.median(x)
# * Analagously for the scale
# * The standard deviation is not robust
x.std()
# Median Absolute Deviation
#
# $$ median_i |X_i - median_j(X_j)|) $$
# Standardized Median Absolute Deviation is a consistent estimator for $\hat{\sigma}$
#
# $$\hat{\sigma}=K \cdot MAD$$
#
# where $K$ depends on the distribution. For the normal distribution for example,
#
# $$K = \Phi^{-1}(.75)$$
stats.norm.ppf(.75)
print(x)
sm.robust.scale.stand_mad(x)
np.array([1,2,3,4,5.]).std()
# * The default for Robust Linear Models is MAD
# * another popular choice is Huber's proposal 2
np.random.seed(12345)
fat_tails = stats.t(6).rvs(40)
kde = sm.nonparametric.KDE(fat_tails)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.density);
print(fat_tails.mean(), fat_tails.std())
print(stats.norm.fit(fat_tails))
print(stats.t.fit(fat_tails, f0=6))
huber = sm.robust.scale.Huber()
loc, scale = huber(fat_tails)
print(loc, scale)
sm.robust.stand_mad(fat_tails)
sm.robust.stand_mad(fat_tails, c=stats.t(6).ppf(.75))
sm.robust.scale.mad(fat_tails)
#### Duncan's Occupational Prestige data - M-estimation for outliers
from statsmodels.graphics.api import abline_plot
from statsmodels.formula.api import ols, rlm
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
print(prestige.head(10))
fig = plt.figure(figsize=(12,12))
ax1 = fig.add_subplot(211, xlabel='Income', ylabel='Prestige')
ax1.scatter(prestige.income, prestige.prestige)
xy_outlier = prestige.ix['minister'][['income','prestige']]
ax1.annotate('Minister', xy_outlier, xy_outlier+1, fontsize=16)
ax2 = fig.add_subplot(212, xlabel='Education',
ylabel='Prestige')
ax2.scatter(prestige.education, prestige.prestige);
ols_model = ols('prestige ~ income + education', prestige).fit()
print(ols_model.summary())
infl = ols_model.get_influence()
student = infl.summary_frame()['student_resid']
print(student)
print(student.ix[np.abs(student) > 2])
print(infl.summary_frame().ix['minister'])
sidak = ols_model.outlier_test('sidak')
sidak.sort('unadj_p', inplace=True)
print(sidak)
fdr = ols_model.outlier_test('fdr_bh')
fdr.sort('unadj_p', inplace=True)
print(fdr)
rlm_model = rlm('prestige ~ income + education', prestige).fit()
print(rlm_model.summary())
print(rlm_model.weights)
#### Hertzprung Russell data for Star Cluster CYG 0B1 - Leverage Points
# * Data is on the luminosity and temperature of 47 stars in the direction of Cygnus.
dta = sm.datasets.get_rdataset("starsCYG", "robustbase", cache=True).data
from matplotlib.patches import Ellipse
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111, xlabel='log(Temp)', ylabel='log(Light)', title='Hertzsprung-Russell Diagram of Star Cluster CYG OB1')
ax.scatter(*dta.values.T)
# highlight outliers
e = Ellipse((3.5, 6), .2, 1, alpha=.25, color='r')
ax.add_patch(e);
ax.annotate('Red giants', xy=(3.6, 6), xytext=(3.8, 6),
arrowprops=dict(facecolor='black', shrink=0.05, width=2),
horizontalalignment='left', verticalalignment='bottom',
clip_on=True, # clip to the axes bounding box
fontsize=16,
)
# annotate these with their index
for i,row in dta.ix[dta['log.Te'] < 3.8].iterrows():
ax.annotate(i, row, row + .01, fontsize=14)
xlim, ylim = ax.get_xlim(), ax.get_ylim()
from IPython.display import Image
Image(filename='star_diagram.png')
y = dta['log.light']
X = sm.add_constant(dta['log.Te'], prepend=True)
ols_model = sm.OLS(y, X).fit()
abline_plot(model_results=ols_model, ax=ax)
rlm_mod = sm.RLM(y, X, sm.robust.norms.TrimmedMean(.5)).fit()
abline_plot(model_results=rlm_mod, ax=ax, color='red')
# * Why? Because M-estimators are not robust to leverage points.
infl = ols_model.get_influence()
h_bar = 2*(ols_model.df_model + 1 )/ols_model.nobs
hat_diag = infl.summary_frame()['hat_diag']
hat_diag.ix[hat_diag > h_bar]
sidak2 = ols_model.outlier_test('sidak')
sidak2.sort('unadj_p', inplace=True)
print(sidak2)
fdr2 = ols_model.outlier_test('fdr_bh')
fdr2.sort('unadj_p', inplace=True)
print(fdr2)
# * Let's delete that line
del ax.lines[-1]
weights = np.ones(len(X))
weights[X[X['log.Te'] < 3.8].index.values - 1] = 0
wls_model = sm.WLS(y, X, weights=weights).fit()
abline_plot(model_results=wls_model, ax=ax, color='green')
# * MM estimators are good for this type of problem, unfortunately, we don't yet have these yet.
# * It's being worked on, but it gives a good excuse to look at the R cell magics in the notebook.
yy = y.values[:,None]
xx = X['log.Te'].values[:,None]
get_ipython().magic(u'load_ext rmagic')
get_ipython().magic(u'R library(robustbase)')
get_ipython().magic(u'Rpush yy xx')
get_ipython().magic(u'R mod <- lmrob(yy ~ xx);')
get_ipython().magic(u'R params <- mod$coefficients;')
get_ipython().magic(u'Rpull params')
get_ipython().magic(u'R print(mod)')
print(params)
abline_plot(intercept=params[0], slope=params[1], ax=ax, color='green')
#### Exercise: Breakdown points of M-estimator
np.random.seed(12345)
nobs = 200
beta_true = np.array([3, 1, 2.5, 3, -4])
X = np.random.uniform(-20,20, size=(nobs, len(beta_true)-1))
# stack a constant in front
X = sm.add_constant(X, prepend=True) # np.c_[np.ones(nobs), X]
mc_iter = 500
contaminate = .25 # percentage of response variables to contaminate
all_betas = []
for i in range(mc_iter):
y = np.dot(X, beta_true) + np.random.normal(size=200)
random_idx = np.random.randint(0, nobs, size=int(contaminate * nobs))
y[random_idx] = np.random.uniform(-750, 750)
beta_hat = sm.RLM(y, X).fit().params
all_betas.append(beta_hat)
all_betas = np.asarray(all_betas)
se_loss = lambda x : np.linalg.norm(x, ord=2)**2
se_beta = map(se_loss, all_betas - beta_true)
##### Squared error loss
np.array(se_beta).mean()
all_betas.mean(0)
beta_true
se_loss(all_betas.mean(0) - beta_true)
| bsd-3-clause |
Unidata/MetPy | v0.6/_downloads/Find_Natural_Neighbors_Verification.py | 3 | 2729 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumradius of the
circumcenter of a circumscribed circle containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.gridding.triangles import find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters and circumradii, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, tri_info = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can then use the information in tri_info later.
#
# The dictionary key is the index of a particular triangle in the Delaunay triangulation data
# structure. 'cc' is that triangle's circumcenter, and 'r' is the radius of the circumcircle
# containing that triangle.
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenter and radius information from tri_info, plot circumcircles and
# circumcenters for each triangle.
for _idx, item in tri_info.items():
ax.plot(item['cc'][0], item['cc'][1], 'k.', markersize=5)
circ = plt.Circle(item['cc'], item['r'], edgecolor='k', facecolor='none',
transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-IPython.py | 1 | 1076 | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Tested with IPython 4.0.0.
from PyInstaller.compat import modname_tkinter, is_win, is_darwin
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
# Ignore 'matplotlib'. IPython contains support for matplotlib.
# Ignore GUI libraries. IPython supports integration with GUI frameworks.
# Assume that it will be imported by any other module when the user really
# uses it.
excludedimports = ['gtk', 'matplotlib', 'PyQt4', 'PyQt5', 'PySide']
# IPython uses 'tkinter' for clipboard access on Linux/Unix. Exclude it on Windows and OS X.
if is_win or is_darwin:
excludedimports.append(modname_tkinter)
datas = collect_data_files('IPython')
| gpl-3.0 |
chaubold/hytra | scripts/train_transition_classifier.py | 1 | 17108 | # pythonpath modification to make hytra and empryonic available
# for import without requiring it to be installed
from __future__ import print_function, absolute_import, nested_scopes, generators, division, with_statement, unicode_literals
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# standard imports
from compiler.ast import flatten
import logging
import glob
import vigra
from vigra import numpy as np
import h5py
from sklearn.neighbors import KDTree
from hytra.pluginsystem.plugin_manager import TrackingPluginManager
import hytra.util.axesconversion
logger = logging.getLogger('TransitionClassifier')
logger.setLevel(logging.DEBUG)
np.seterr(all='raise')
# read in 'n2-n1' of images
def read_in_images(n1, n2, files, axes):
gt_labelimage = [vigra.impex.readHDF5(f, 'segmentation/labels') for f in files[n1:n2]]
gt_labelimage = [hytra.util.axesconversion.adjustOrder(img, axes, 'xyzc') for img in gt_labelimage]
logger.info("Found segmentation of shape {}".format(gt_labelimage[0].shape))
return gt_labelimage
# compute features from input data and return them
def compute_features(raw_image, labeled_image, n1, n2, pluginManager, filepath):
# perhaps there is an elegant way to get into the RegionFeatureAccumulator.
# For now, the new feature are a separate vector
allFeat = []
for i in range(0, n2 - n1):
moreFeats, _ = pluginManager.applyObjectFeatureComputationPlugins(
len(raw_image.squeeze().shape)-1, raw_image[..., i, 0], labeled_image[i][..., 0], i, filepath)
frameFeatureItems = []
for f in moreFeats:
frameFeatureItems = frameFeatureItems + f.items()
allFeat.append(dict(frameFeatureItems))
return allFeat
# read in 'n2-n1' of labels
def read_positiveLabels(n1, n2, files):
gt_moves = [vigra.impex.readHDF5(f, 'tracking/Moves') for f in files[n1+1:n2]]
return gt_moves
def getValidRegionCentersAndTheirIDs(featureDict,
countFeatureName='Count',
regionCenterName='RegionCenter'):
"""
From the feature dictionary of a certain frame,
find all objects with pixel count > 0, and return their
region centers and ids.
"""
validObjectMask = featureDict[countFeatureName] > 0
validObjectMask[0] = False
regionCenters = featureDict[regionCenterName][validObjectMask, :]
objectIds = list(np.where(validObjectMask)[0])
return regionCenters, objectIds
def negativeLabels(features, positiveLabels):
"""
Compute negative labels by finding 3 nearest neighbors in the next frame, and
filtering out those pairings that are part of the positiveLabels.
**Returns** a list of lists of pairs of indices, where there are as many inner lists
as there are pairs of consecutive frames ordered by time,
e.g. for frame pairs (0,1), (1,2), ... (n-1,n).
Each pair in such a list then contains an index into the earlier frame of the pair,
and one index into the later frame.
"""
numFrames = len(features)
neg_lab = []
for i in range(1, numFrames): # for all frames but the first
logger.debug("Frame {}\n".format(i))
frameNegLab = []
# build kdtree for frame i
centersAtI, objectIdsAtI = getValidRegionCentersAndTheirIDs(features[i])
kdt = KDTree(centersAtI, metric='euclidean')
# find k=3 nearest neighbors of each object of frame i-1 in frame i
centersAtIMinusOne, objectIdsAtIMinusOne = getValidRegionCentersAndTheirIDs(features[i - 1])
neighb = kdt.query(centersAtIMinusOne, k=3, return_distance=False)
for j in range(0, neighb.shape[0]): # for all valid objects in frame i-1
logger.debug('looking at neighbors of {} at position {}'.format(
objectIdsAtIMinusOne[j], features[i - 1]['RegionCenter'][objectIdsAtIMinusOne[j], ...]))
for m in range(0, neighb.shape[1]): # for all neighbors
pair = [objectIdsAtIMinusOne[j], objectIdsAtI[neighb[j][m]]]
if pair not in positiveLabels[i - 1].tolist():
# add one because we've removed the first element when creating the KD tree
frameNegLab.append(pair)
logger.debug("Adding negative example: {} at position {}".format(
pair, features[i]['RegionCenter'][objectIdsAtI[neighb[j][m]], ...]))
else:
logger.debug("Discarding negative example {} which is a positive annotation".format(pair))
neg_lab.append(frameNegLab)
return neg_lab
def find_features_without_NaNs(features):
"""
Remove all features from the list of selected features which have NaNs
"""
selectedFeatures = features[0].keys()
for featuresPerFrame in features:
for key, value in featuresPerFrame.items():
if not isinstance(value, list) and (np.any(np.isnan(value)) or np.any(np.isinf(value))):
try:
selectedFeatures.remove(key)
except:
pass # has already been deleted
forbidden = ["Global<Maximum >", "Global<Minimum >", 'Histogram', 'Polygon', 'Defect Center',
'Center', 'Input Center', 'Weighted<RegionCenter>']
for f in forbidden:
if f in selectedFeatures:
selectedFeatures.remove(f)
selectedFeatures.sort()
return selectedFeatures
class TransitionClassifier:
def __init__(self, selectedFeatures, numSamples=None):
"""
Set up a transition classifier class that makes it easy to add samples, train and store the RF.
:param selectedFeatures: list of feature names that are supposed to be used
:param numSamples: if given, the data array for the samples is allocated with the proper dimensions,
otherwise it needs to be resized whenever new samples are added.
"""
self.rf = vigra.learning.RandomForest()
self.mydata = None
self.labels = []
self.selectedFeatures = selectedFeatures
self._numSamples = numSamples
self._nextIdx = 0
# TODO: check whether prediction here and in hypotheses graph script are the same!
def addSample(self, f1, f2, label, pluginManager):
# if self.labels == []:
self.labels.append(label)
# else:
# self.labels = np.concatenate((np.array(self.labels),label)) # for adding batches of features
features = self.constructSampleFeatureVector(f1, f2, pluginManager)
if self._numSamples is None:
# use vstack
if self.mydata is None:
self.mydata = features
else:
self.mydata = np.vstack((self.mydata, features))
else:
# allocate full array once, then fill in row by row
if self.mydata is None:
self.mydata = np.zeros((self._numSamples, features.shape[0]))
assert(self._nextIdx < self._numSamples)
self.mydata[self._nextIdx, :] = features
self._nextIdx += 1
def constructSampleFeatureVector(self, f1, f2, pluginManager):
featVec = pluginManager.applyTransitionFeatureVectorConstructionPlugins(f1, f2, self.selectedFeatures)
return np.array(featVec)
# adding a comfortable function, where one can easily introduce the data
def add_allData(self, mydata, labels):
self.mydata = mydata
self.labels = labels
def train(self, withFeatureImportance=False):
logger.info(
"Training classifier from {} positive and {} negative labels".format(
np.count_nonzero(np.asarray(self.labels)), len(self.labels) - np.count_nonzero(np.asarray(self.labels))))
logger.info("Training classifier from a feature vector of length {}".format(self.mydata.shape))
if withFeatureImportance:
oob, featImportance = self.rf.learnRFWithFeatureSelection(
self.mydata.astype("float32"),
(np.asarray(self.labels)).astype("uint32").reshape(-1, 1))
logger.debug("RF feature importance: {}".format(featImportance))
# logger.debug('Feature names: {}'.format(self.featureNames))
else:
oob = self.rf.learnRF(
self.mydata.astype("float32"),
(np.asarray(self.labels)).astype("uint32").reshape(-1, 1))
logger.info("RF trained with OOB Error {}".format(oob))
# def predictSample(self, test_data=None, f1=None, f2=None):
# if test_data is not None:
# return self.rf.predictLabels(test_data.astype('float32'))
# else:
# data = self.constructSampleFeatureVector(f1, f2)
# if len(data.shape) < 2:
# data = np.expand_dims(data, axis=0)
# return self.rf.predictLabels(data.astype('float32'))
# def predictProbabilities(self, test_data=None, f1=None, f2=None):
# if test_data is not None:
# return self.rf.predictProbabilities(test_data.astype('float32'))
# else:
# data = self.constructSampleFeatureVector(f1, f2)
# print(data)
# if len(data.shape) < 2:
# data = np.expand_dims(data, axis=0)
# return self.rf.predictProbabilities(data.astype('float32'))
def predictLabels(self, test_data, threshold=0.5):
prob = self.rf.predictProbabilities(test_data.astype('float32'))
res = np.copy(prob)
for i in range(0, len(prob)):
if prob[i][1] >= threshold:
res[i] = 1.
else:
res[i] = 0
return np.delete(res, 0, 1)
def writeRF(self, outputFilename):
self.rf.writeHDF5(outputFilename, pathInFile='/ClassifierForests/Forest0000')
# write selected features
with h5py.File(outputFilename, 'r+') as f:
featureNamesH5 = f.create_group('SelectedFeatures')
featureNamesH5 = featureNamesH5.create_group('Standard Object Features')
for feature in self.selectedFeatures:
featureNamesH5.create_group(feature)
if __name__ == '__main__':
import configargparse as argparse
parser = argparse.ArgumentParser(description="trainRF",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', is_config_file=True, help='config file path')
parser.add_argument("--groundtruth", dest='filepath', type=str, nargs='+',
help="read ground truth from this folder. Can be also a list of paths, to train from more datasets.", metavar="FILE")
parser.add_argument("--groundtruth-axes", dest='groundtruth_axes', type=str, nargs='+', default=['xyzc'],
help="axes ordering of the ground truth segmentations per frame (no t!), e.g. xyzc", metavar="FILE")
parser.add_argument("--raw-data-file", dest='rawimage_filename', type=str, nargs='+',
help="filepath+name of the raw image. Can be a list of paths, to train from more datasets.", metavar="FILE")
parser.add_argument("--raw-data-path", dest='rawimage_h5_path', type=str,
help="Path inside the rawimage HDF5 file", default='volume/data')
parser.add_argument("--raw-data-axes", dest='rawimage_axes', type=str, nargs='+', default=['txyzc'],
help="axes ordering of the raw image, e.g. xyztc. Can be a list of paths, to train from more datasets.", metavar="FILE")
parser.add_argument("--init-frame", default=0, type=int, dest='initFrame',
help="where to begin reading the frames")
parser.add_argument("--end-frame", default=-1, type=int, dest='endFrame',
help="where to end frames")
parser.add_argument("--transition-classifier-file", dest='outputFilename', type=str,
help="save RF into file", metavar="FILE")
parser.add_argument("--filepattern", dest='filepattern', type=str, nargs='+', default=['0*.h5'],
help="File pattern of the ground truth files. Can be also a list of paths, to train from more datasets.")
parser.add_argument("--verbose", dest='verbose', action='store_true', default=False)
parser.add_argument('--plugin-paths', dest='pluginPaths', type=str, nargs='+',
default=[os.path.abspath('../hytra/plugins')],
help='A list of paths to search for plugins for the tracking pipeline.')
args, unknown = parser.parse_known_args()
logging.basicConfig(level=logging.INFO)
if args.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logging.debug("Ignoring unknown parameters: {}".format(unknown))
assert len(args.rawimage_filename) == len(args.rawimage_axes) == len(args.filepattern) == len(args.filepath) == len(args.groundtruth_axes)
# read raw image
numSamples = 0
mlabels = None
for dataset in range(len(args.rawimage_filename)):
rawimage_filename = args.rawimage_filename[dataset]
with h5py.File(rawimage_filename, 'r') as h5raw:
rawimage = h5raw[args.rawimage_h5_path].value
# transform such that the order is the following: X,Y,(Z),T, C
rawimage = hytra.util.axesconversion.adjustOrder(rawimage, args.rawimage_axes[dataset], 'xyztc')
logger.info('Done loading raw data from dataset {} of shape {}'.format(dataset, rawimage.shape))
# find ground truth files
# filepath is now a list of filepaths'
filepath = args.filepath[dataset]
# filepattern is now a list of filepatterns
files = glob.glob(os.path.join(filepath, args.filepattern[dataset]))
files.sort()
initFrame = args.initFrame
endFrame = args.endFrame
if endFrame < 0:
endFrame += len(files)
# compute features
trackingPluginManager = TrackingPluginManager(verbose=args.verbose,
pluginPaths=args.pluginPaths)
features = compute_features(rawimage,
read_in_images(initFrame, endFrame, files, args.groundtruth_axes[dataset]),
initFrame,
endFrame,
trackingPluginManager,
rawimage_filename)
logger.info('Done computing features from dataset {}'.format(dataset))
selectedFeatures = find_features_without_NaNs(features)
pos_labels = read_positiveLabels(initFrame, endFrame, files)
neg_labels = negativeLabels(features, pos_labels)
numSamples += 2 * sum([len(l) for l in pos_labels]) + sum([len(l) for l in neg_labels])
logger.info('Done extracting {} samples'.format(numSamples))
TC = TransitionClassifier(selectedFeatures, numSamples)
if dataset > 0:
TC.labels = mlabels # restore labels overwritten by constructor
# compute featuresA for each object A from the feature matrix from Vigra
def compute_ObjFeatures(features, obj):
featureDict = {}
for key in features:
if key == "Global<Maximum >" or key == "Global<Minimum >": # this ones have only one element
featureDict[key] = features[key]
else:
featureDict[key] = features[key][obj]
return featureDict
for k in range(0, len(features) - 1):
for i in pos_labels[k]:
# positive
logger.debug("Adding positive sample {} from pos {} to {}".format(
i, features[k]['RegionCenter'][i[0]], features[k + 1]['RegionCenter'][i[1]]))
TC.addSample(compute_ObjFeatures(
features[k], i[0]), compute_ObjFeatures(features[k + 1], i[1]), 1, trackingPluginManager)
TC.addSample(compute_ObjFeatures(
features[k + 1], i[1]), compute_ObjFeatures(features[k], i[0]), 1, trackingPluginManager)
for i in neg_labels[k]:
# negative
logger.debug("Adding negative sample {} from pos {} to {}".format(i,
features[k]['RegionCenter'][i[0]], features[k + 1]['RegionCenter'][i[1]]))
TC.addSample(compute_ObjFeatures(
features[k], i[0]), compute_ObjFeatures(features[k + 1], i[1]), 0, trackingPluginManager)
mlabels =TC.labels
logger.info('Done adding samples to RF. Beginning training...')
TC.train()
logger.info('Done training RF')
srcObject = compute_ObjFeatures(features[0], 1)
destObject = compute_ObjFeatures(features[1], 2)
# delete file before writing
if os.path.exists(args.outputFilename):
os.remove(args.outputFilename)
TC.writeRF(args.outputFilename) # writes learned RF to disk
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/compat/__init__.py | 6 | 11686 | """
compat
======
Cross-compatible functions for Python 2 and 3.
Key items to import for 2/3 compatible code:
* iterators: range(), map(), zip(), filter(), reduce()
* lists: lrange(), lmap(), lzip(), lfilter()
* unicode: u() [u"" is a syntax error in Python 3.0-3.2]
* longs: long (int in Python 3)
* callable
* iterable method compatibility: iteritems, iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
Other items:
* OrderedDefaultDict
* platform checker
"""
# pylint disable=W0611
# flake8: noqa
import functools
import itertools
from distutils.version import LooseVersion
from itertools import product
import sys
import types
from unicodedata import east_asian_width
import struct
import inspect
from collections import namedtuple
PY2 = sys.version_info[0] == 2
PY3 = (sys.version_info[0] >= 3)
PY35 = (sys.version_info >= (3, 5))
PY36 = (sys.version_info >= (3, 6))
try:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
BytesIO = StringIO
import cPickle
import httplib
except ImportError:
import builtins
from io import StringIO, BytesIO
cStringIO = StringIO
import pickle as cPickle
import http.client as httplib
from pandas.compat.chainmap import DeepChainMap
if PY3:
def isidentifier(s):
return s.isidentifier()
def str_to_bytes(s, encoding=None):
return s.encode(encoding or 'ascii')
def bytes_to_str(b, encoding=None):
return b.decode(encoding or 'utf-8')
# The signature version below is directly copied from Django,
# https://github.com/django/django/pull/4846
def signature(f):
sig = inspect.signature(f)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
keywords = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
keywords = keywords[0] if keywords else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and p.default is not p.empty
] or None
argspec = namedtuple('Signature', ['args', 'defaults',
'varargs', 'keywords'])
return argspec(args, defaults, varargs, keywords)
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
intern = sys.intern
reduce = functools.reduce
long = int
unichr = chr
# This was introduced in Python 3.3, but we don't support
# Python 3.x < 3.4, so checking PY3 is safe.
FileNotFoundError = FileNotFoundError
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
FileNotFoundError = IOError
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
def str_to_bytes(s, encoding='ascii'):
return s
def bytes_to_str(b, encoding='ascii'):
return b
def signature(f):
return inspect.getargspec(f)
# import iterator versions of these functions
range = xrange
intern = intern
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
if PY2:
def iteritems(obj, **kw):
return obj.iteritems(**kw)
def iterkeys(obj, **kw):
return obj.iterkeys(**kw)
def itervalues(obj, **kw):
return obj.itervalues(**kw)
next = lambda it: it.next()
else:
def iteritems(obj, **kw):
return iter(obj.items(**kw))
def iterkeys(obj, **kw):
return iter(obj.keys(**kw))
def itervalues(obj, **kw):
return iter(obj.values(**kw))
next = next
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from Benjamin Peterson's six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def u(s):
return s
def u_safe(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import lzma from the std library """
import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name/qualname attributes of the function """
f.__name__ = name
f.__qualname__ = '{klass}.{name}'.format(
klass=cls.__name__,
name=name)
f.__module__ = cls.__module__
return f
ResourceWarning = ResourceWarning
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def u(s):
return unicode(s, "unicode_escape")
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import the backported lzma library
or raise ImportError if not available """
from backports import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name attributes of the function """
f.__name__ = name
return f
class ResourceWarning(Warning):
pass
string_and_binary_types = string_types + (binary_type,)
try:
# callable reintroduced in later versions of Python
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
from collections import OrderedDict, Counter
if PY3:
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
# this version of raise is a syntax error in Python 3
exec("""
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
""")
raise_with_traceback.__doc__ = """Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
# http://stackoverflow.com/questions/4126348
# Thanks to @martineau at SO
from dateutil import parser as _date_parser
import dateutil
if LooseVersion(dateutil.__version__) < '2.0':
@functools.wraps(_date_parser.parse)
def parse_date(timestr, *args, **kwargs):
timestr = bytes(timestr)
return _date_parser.parse(timestr, *args, **kwargs)
elif PY2 and LooseVersion(dateutil.__version__) == '2.0':
# dateutil brokenness
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
else:
parse_date = _date_parser.parse
class OrderedDefaultdict(OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self): # optional, for pickle support
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, list(self.items())
# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
| mit |
Brett777/Predict-Risk | ApprovalModel.py | 1 | 1979 |
# coding: utf-8
# In[ ]:
import h2o
import pandas as pd
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def approve_loan(Loan_Amount,Term,Interest_Rate,Employment_Years,Home_Ownership,Annual_Income,Verification_Status,Loan_Purpose,State,
Debt_to_Income,Delinquent_2yr,Revolving_Cr_Util,Total_Accounts,Longest_Credit_Length):
# connect to the model scoring service
h2o.connect()
# open the downloaded model
ChurnPredictor = h2o.load_model(path='DRF_model_1496459915419_4')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'Loan_Amount' : Loan_Amount,
'Term' : Term,
'Interest_Rate' : Interest_Rate,
'Employment_Years' : Employment_Years,
'Home_Ownership' : Home_Ownership,
'Annual_Income' : Annual_Income,
'Verification_Status' : Verification_Status,
'Loan_Purpose' : Loan_Purpose,
'State' : State,
'Debt_to_Income' : Debt_to_Income,
'Delinquent_2yr' : Delinquent_2yr,
'Revolving_Cr_Util' : Revolving_Cr_Util,
'Total_Accounts' : Total_Accounts,
'Longest_Credit_Length' : Longest_Credit_Length}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityBad = predictionsOut[1][1]
probabilityGood = predictionsOut[1][2]
return "Prediction: " + str(prediction) + " |Probability of Bad Loan: " + str(probabilityBad) + " |Probability of Good Loan: " + str(probabilityGood)
| apache-2.0 |
datacommonsorg/data | scripts/india_census/common/generic_base.py | 1 | 5322 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import csv
import copy
import pandas as pd
CENSUS_DATA_COLUMN_START = 7
class CensusGenericDataLoaderBase(object):
GENERIC_TEMPLATE_STAT_VAR = """Node: {StatisticalVariable}
description: "{description}"
typeOf: dcs:StatisticalVariable
populationType: schema:Person
measuredProperty: {measuredProperty}
"""
GENERIC_TEMPLATE_TMCF = """Node: E:IndiaCensus{year}_{dataset_name}->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:IndiaCensus{year}_{dataset_name}->StatisticalVariable
observationDate: C:IndiaCensus{year}_{dataset_name}->Year
observationAbout: E:IndiaCensus{year}_{dataset_name}->E1
value: C:IndiaCensus{year}_{dataset_name}->Value
Node: E:IndiaCensus{year}_{dataset_name}->E1
typeOf: schema:Place
indianCensusAreaCode{year}: C:IndiaCensus{year}_{dataset_name}->census_location_id"""
"""An object that represents Census Data and its variables.
Attributes:
census_columns (list): It will have all the data column names of a dataset
census_year : Census year
csv_file_path : Path where cleaned csv file will be saved
data_file_path : Input XLS file from Census of India. Can be url or local path.
dataset_name : Census dataset name. Eg:Primary_Abstract_Data
existing_stat_var (list): List of existing stat vars that we don't need to generate
mcf (list): Description
mcf_file_path : Description
metadata_file_path : Description
raw_df : Raw census data as dataframe
stat_var_index (dict): local storage for census column name and corresponding statvar
tmcf_file_path : Path where generated tmcf file will be saved
"""
def __init__(self, data_file_path, metadata_file_path, mcf_file_path,
tmcf_file_path, csv_file_path, existing_stat_var, census_year,
dataset_name):
"""
Constructor
Args:
data_file_path : Input XLS file from Census of India. Can be url or local path
metadata_file_path : Meta data csv file which has attribute details
mcf_file_path : Path where generated mcf file will be saved
tmcf_file_path : Path where generated tmcf file will be saved
csv_file_path : Path where cleaned csv file will be saved
existing_stat_var : List of existing stat vars that we don't need to generate
census_year : Census Year
dataset_name : Census dataset name. Eg:Primary_Abstract_Data
"""
self.data_file_path = data_file_path
self.metadata_file_path = metadata_file_path
self.mcf_file_path = mcf_file_path
self.csv_file_path = csv_file_path
self.tmcf_file_path = tmcf_file_path
self.existing_stat_var = existing_stat_var
self.census_year = census_year
self.dataset_name = dataset_name
self.raw_df = None
self.stat_var_index = {}
self.census_columns = []
def _download_and_standardize(self):
dtype = {
'State': str,
'District': str,
'Subdistt': str,
"Town/Village": str
}
self.raw_df = pd.read_excel(self.data_file_path, dtype=dtype)
self.census_columns = self.raw_df.columns[CENSUS_DATA_COLUMN_START:]
def _format_location(self, row):
# In this specific format there is no Level defined.
# A non zero location code from the lowest administration area
# takes the precedence.
if row["Town/Village"] != "000000":
return row["Town/Village"]
elif row["Subdistt"] != "00000":
return row["Subdistt"]
elif row["District"] != "000":
return row["District"]
elif row["State"] != "00":
return row["State"]
else:
# This is india level location
return "0"
def _format_data(self):
# This function is overridden in the child class
pass
def _get_base_name(self, row):
# This function is overridden in the child class
name = "Count_"
return name
def _create_variable(self, data_row, **kwargs):
# This function is overridden in the child class
pass
def _create_mcf(self):
# This function is overridden in the child class
pass
def _create_tmcf(self):
with open(self.tmcf_file_path, 'w+', newline='') as f_out:
f_out.write(
self.GENERIC_TEMPLATE_TMCF.format(
year=self.census_year, dataset_name=self.dataset_name))
def process(self):
self._download_and_standardize()
self._create_mcf()
self._create_tmcf()
self._format_data()
| apache-2.0 |
bundgus/python-playground | matplotlib-playground/examples/event_handling/looking_glass.py | 1 | 1280 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
x, y = np.random.rand(2, 200)
fig, ax = plt.subplots()
circ = patches.Circle((0.5, 0.5), 0.25, alpha=0.8, fc='yellow')
ax.add_patch(circ)
ax.plot(x, y, alpha=0.2)
line, = ax.plot(x, y, alpha=1.0, clip_path=circ)
class EventHandler(object):
def __init__(self):
fig.canvas.mpl_connect('button_press_event', self.onpress)
fig.canvas.mpl_connect('button_release_event', self.onrelease)
fig.canvas.mpl_connect('motion_notify_event', self.onmove)
self.x0, self.y0 = circ.center
self.pressevent = None
def onpress(self, event):
if event.inaxes != ax:
return
if not circ.contains(event)[0]:
return
self.pressevent = event
def onrelease(self, event):
self.pressevent = None
self.x0, self.y0 = circ.center
def onmove(self, event):
if self.pressevent is None or event.inaxes != self.pressevent.inaxes:
return
dx = event.xdata - self.pressevent.xdata
dy = event.ydata - self.pressevent.ydata
circ.center = self.x0 + dx, self.y0 + dy
line.set_clip_path(circ)
fig.canvas.draw()
handler = EventHandler()
plt.show()
| mit |
zbarge/zeex | zeex/core/views/actions/merge_purge.py | 1 | 33284 | """
MIT License
Copyright (c) 2016 Zeke Barge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pandas as pd
import logging
from functools import partial
from zeex.core.compat import QtGui, QtCore
from zeex.core.models.actions import FileViewModel
from zeex.core.ctrls.dataframe import DataFrameModelManager
from zeex.core.ui.actions.merge_purge_ui import Ui_MergePurgeDialog
from zeex.core.utility.collection import DictConfig, SettingsINI
from zeex.core.utility.pandatools import gather_frame_fields
from zeex.core.utility.widgets import create_standard_item_model
from zeex.core.views.basic.map_grid import MapGridDialog
from zeex.core.views.basic.push_grid import PushGridHandler
from zeex.core.ctrls.dataframe import DataFrameModel
class MergePurgeDialog(QtGui.QDialog, Ui_MergePurgeDialog):
"""
This dialog allows a user to do large updates on a given source DataFrameModel.
- Merging other file(s) with the source based on common keys/fields
- Purging records from the source using other file(s) based on common keys/fields
- Sorting the DataFrame by multiple columns/ascending/descending
- Deduplicating the DataFrame based on common keys/fields
Settings can exported to a config.ini file and re-imported at a later time.
"""
signalMergeFileOpened = QtCore.Signal(str) # file path
signalSFileOpened = QtCore.Signal(str) # file path
signalSourcePathSet = QtCore.Signal(str) #file path
signalExecuted = QtCore.Signal(str, str, str) # source_path, dest_path, report_path
def __init__(self, df_manager: DataFrameModelManager, parent=None, source_model=None):
"""
:param df_manager: (DataFrameModelManager)
This will be used to handle reading/updating of DataFrameModels used
in the operation.
:param parent: (QMainWindow)
:param source_model: (DataFrameModel)
An optional source DataFrameModel
"""
self.df_manager = df_manager
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.source_model = source_model
self._merge_view_model = FileViewModel()
self._suppress_view_model = FileViewModel()
self._purge_files = {}
self._merge_files = {}
self._field_map_grids = {}
self._field_map_data = {}
self.sortAscHandler = None
self.sortOnHandler = None
self.dedupeOnHandler = None
self.uniqueFieldsHandler = None
self.gatherFieldsHandler = None
self.configure()
if self.source_model is not None:
self.set_source_model(source_model, configure=True)
def configure(self, source_path=None, dest_path=None):
"""
Connects main buttons and actions.
:param source_path: (str, default None)
If this is None there must be a valid path already in the sourcePathLineEdit or an AssertionError raises.
:param dest_path: (str, default None)
Optional custom destination path to be added to the destPathLineEdit.
:return: None
"""
if source_path is None:
source_path = self.sourcePathLineEdit.text()
if os.path.isfile(source_path):
self.set_line_edit_paths(source_path, dest_path=dest_path)
if self.sortAscHandler is None:
self.set_handler_sort_asc()
source_func = partial(self.open_file, model_signal=self.signalSourcePathSet)
self.signalSourcePathSet.connect(self.set_source_model_from_browse)
self.btnBrowseSourcePath.clicked.connect(source_func)
self.btnBrowseDestPath.clicked.connect(self.set_dest_path_from_browse)
self.signalMergeFileOpened.connect(self.add_merge_file)
merge_file_func = partial(self.open_file, model_signal=self.signalMergeFileOpened)
self.btnAddMergeFile.clicked.connect(merge_file_func)
self.btnBrowseMergeFile.clicked.connect(merge_file_func)
self.btnDeleteMergeFile.clicked.connect(partial(self.remove_file, self.mergeFileTable))
self.btnEditMergeFile.clicked.connect(partial(self.open_edit_file_window, self.mergeFileTable, self._merge_files))
self.mergeFileTable.setModel(self._merge_view_model)
self.signalSFileOpened.connect(self.add_purge_file)
sfile_func = partial(self.open_file, model_signal=self.signalSFileOpened)
self.btnEditSFile.clicked.connect(partial(self.open_edit_file_window, self.sFileTable, self._purge_files))
self.btnDeleteSFile.clicked.connect(partial(self.remove_file, self.sFileTable))
self.btnAddSFile.clicked.connect(sfile_func)
self.btnBrowseSFile.clicked.connect(sfile_func)
self.sFileTable.setModel(self._suppress_view_model)
self.btnMapSFields.clicked.connect(partial(self.open_field_map, self.sFileTable, self._purge_files))
self.btnMapMergeFields.clicked.connect(partial(self.open_field_map, self.mergeFileTable, self._merge_files))
self.btnExecute.clicked.connect(self.execute)
self.btnExportTemplate.clicked.connect(self.export_settings)
self.btnImportTemplate.clicked.connect(self.import_settings)
self.btnReset.clicked.connect(self.reset)
def set_source_model_from_browse(self, filepath):
self.set_line_edit_paths(filepath, dest_path=False)
self.set_source_model(configure=True)
def set_dest_path_from_browse(self, filepath=None):
if filepath is None:
try:
dirname = os.path.dirname(self.df_manager.last_path_read)
except:
dirname = ''
filepath = QtGui.QFileDialog.getOpenFileName(self, dir=dirname)[0]
self.destPathLineEdit.setText(filepath)
def set_source_model(self, model=None, configure=True):
"""
Sets the source DataFrameModel for the Dialog.
:param model: (DataFrameModel)
The DataFrameModel to be set.
:param configure:
True re-configures file path line edits and the listviews.
:return:
"""
if not hasattr(model, 'dataFrame'):
if model is None:
model = self.sourcePathLineEdit.text()
if isinstance(model, str) and os.path.exists(model):
model = self.df_manager.read_file(model)
else:
raise Exception("model parameter must be a filepath or a qtpandas.models.DataFrameModel")
if self.source_model is not None:
models_different = model.filePath != self.source_model.filePath
if models_different:
try:
self.source_model.dataFrameChanged.disconnect(self.sync)
except RuntimeError:
pass
else:
models_different = True
if models_different:
self.source_model = model
self.source_model.dataFrameChanged.connect(self.sync)
if configure:
self.sync()
def sync(self):
df = self.source_model.dataFrame()
cols = df.columns.tolist()
if self.dedupeOnHandler is None or self.uniqueFieldsHandler is None:
self.set_push_grid_handlers()
else:
self.dedupeOnHandler.set_model_from_list(cols)
self.gatherFieldsHandler.set_model_from_list(cols)
self.sortOnHandler.set_model_from_list(cols)
self.uniqueFieldsHandler.set_model_from_list(cols)
self.set_primary_key_combo_box()
self.set_line_edit_paths(source_path=self.source_model.filePath)
def set_line_edit_paths(self, source_path=None, dest_path=None):
"""
Sets the source/destination line edits in the Dialog.
:param source_path: (str, default None)
An optional valid filepath for the source DataFrameModel.
If None, :param dest_path cannot be None.
:param dest_path: (str, default None)
An optional destination path. One will be created automatically
if None is given.
False will prevent the destination path from being set at all.
:return: None
"""
assert any([dest_path, source_path]), "source_path or dest_path must be set."
if dest_path is None:
dirname = os.path.dirname(source_path)
base, ext = os.path.splitext(os.path.basename(source_path))
dest_path = os.path.join(dirname, base + "_merged" + ext)
if source_path:
self.sourcePathLineEdit.setText(source_path)
if dest_path:
self.destPathLineEdit.setText(dest_path)
def set_push_grid_handlers(self, column_model=None, sorton_model=None, sortasc_model=None,
dedupe_model=None, gather_model=None, unique_model=None):
"""
Sets all default push grid handlers for the dialog.
:param column_model: (QStandardItemModel, default None)
:param sorton_model: ((QStandardItemModel,list) default None)
:param sortasc_model: ((QStandardItemModel,list) default None)
:param dedupe_model: ((QStandardItemModel,list) default None)
:return:
"""
if column_model is None:
column_model = self.get_source_columns_model()
self.set_handler_sort_on(column_model=None, default_model=sorton_model)
self.set_handler_sort_asc(default_model=sortasc_model)
self.set_handler_dedupe_on(column_model=None, default_model=dedupe_model)
self.set_handler_gather_fields(column_model=None, default_model=gather_model)
self.set_handler_unique_fields(column_model=None, default_model=unique_model)
def set_handler_sort_on(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.sortOnHandler = PushGridHandler(left_model=column_model, left_view=self.sortOnLeftView,
left_button=self.sortOnLeftButton,
left_delete=True, right_model=default_model,
right_view=self.sortOnRightView,
right_button=self.sortOnRightButton)
def set_handler_sort_asc(self, default_model=None, overwrite=False):
if self.sortAscHandler is None or default_model is not None or overwrite:
sort_asc = QtGui.QStandardItemModel()
sort_asc.appendRow(QtGui.QStandardItem('True'))
sort_asc.appendRow(QtGui.QStandardItem('False'))
self.sortAscHandler = PushGridHandler(left_model=sort_asc, left_view=self.sortAscLeftView,
left_button=self.sortAscLeftButton,
left_delete=False, right_model=default_model,
right_view=self.sortAscRightView,
right_button=self.sortAscRightButton)
def set_handler_dedupe_on(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.dedupeOnHandler = PushGridHandler(left_model=column_model, left_view=self.dedupeOnLeftView,
left_button=self.dedupeOnLeftButton,
left_delete=True, right_model=default_model,
right_view=self.dedupeOnRightView,
right_button=self.dedupeOnRightButton)
def set_handler_gather_fields(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.gatherFieldsHandler = PushGridHandler(left_model=column_model,
left_view=self.gatherFieldsListViewLeft,
left_button=self.gatherFieldsButtonLeft,
left_delete=True, right_model=default_model,
right_view=self.gatherFieldsListViewRight,
right_button=self.gatherFieldsButtonRight)
def set_handler_unique_fields(self, column_model=None, default_model=None):
if column_model is None:
column_model = self.get_source_columns_model()
self.uniqueFieldsHandler = PushGridHandler(left_model=column_model,
left_view=self.uniqueFieldsListViewLeft,
left_button=self.uniqueFieldsPushButtonLeft,
left_delete=True, right_model=default_model,
right_view=self.uniqueFieldsListViewRight,
right_button=self.uniqueFieldsPushButtonRight)
def get_source_columns_model(self, raise_on_error=True) -> QtGui.QStandardItemModel:
"""
Quick way to get a QStandardItemModel form the DataFrameModel's columns.
:param raise_on_error: (bool, default True)
Raises an error if the source_model has not yet been set.
:return: (QtGui.QStandardItemModel)
"""
if self.source_model is None:
if raise_on_error:
raise Exception("Cannot get source_columns as source_model is None!")
else:
columns = []
else:
columns = self.source_model.dataFrame().columns.tolist()
return create_standard_item_model(columns)
def open_file(self, file_names: list=None, model_signal=None, allow_multi=True):
"""
Opens a Merge or Purge file (or really any file) and calls the
given model signal after registering the DataFrameModel with the DataFrameModelManager.
:param file_names: (list, default None)
An optional list of filenames to open.
The user must select filenames otherwise.
:param model_signal: (QtCore.Signal)
A signal to be called after successfully reading the DataFrameModel.
:param allow_multi: (bool, default True)
True allows multiple files to be read (and the signal called each time).
False allows only the first file to be read.
:return: None
You can call MergePurgeDialog.df_manager.get_frame(filename) to
retrieve a DataFrameModel.
"""
if file_names is None:
dirname = os.path.dirname(self.sourcePathLineEdit.text())
file_names = QtGui.QFileDialog.getOpenFileNames(parent=self,
dir=dirname)[0]
if isinstance(file_names, str):
file_names = list(file_names)
assert not isinstance(file_names, str) and hasattr(file_names, "__iter__"), "file_names is not list-like!"
if allow_multi is False:
file_names = list(file_names[0])
for f in file_names:
try:
if not isinstance(f, str) and hasattr(f, '__iter__'):
f = f[0]
if os.path.exists(f):
self.df_manager.read_file(f)
if model_signal is not None:
model_signal.emit(f)
logging.info("Emitted signal: {}".format(f))
except Exception as e:
logging.error(e)
@QtCore.Slot(str)
def add_merge_file(self, file_path):
"""
Adds a merge file to the merge view and
also updates the internal dictionary storing the filepath/model.
:param file_path: (str)
The file path to add.
:return: None
"""
model = self.df_manager.get_model(file_path)
model.enableEditing(True)
self._merge_files.update({file_path:model})
self._merge_view_model.append_df_model(model)
self.mergeFileTable.setColumnWidth(0, 500)
self._merge_view_model.setHorizontalHeaderLabels(['filepath', 'count'])
@QtCore.Slot(str)
def add_purge_file(self, file_path):
"""
Adds a purge file to the purge view and
also updates the internal dictionary storing the filepath/model.
:param file_path: (str)
The file path to add.
:return: None
"""
model = self.df_manager.get_model(file_path)
model.enableEditing(True)
self._purge_files.update({file_path:model})
self._suppress_view_model.append_df_model(model)
self.sFileTable.setColumnWidth(0, 500)
self._suppress_view_model.setHorizontalHeaderLabels(['filepath', 'count'])
def remove_file(self, view, indexes=None):
"""
Removes selected file(s) from the given view.
:param view: (QListView)
The view to drop the selected indexes on.
:param indexes: (list, default None)
A list of given indexes to drop.
Otherwise relies on selected indexes in the view.
:return: None
"""
if indexes is None:
indexes = [x.row() for x in view.selectedIndexes()]
model = view.model()
for idx in indexes:
model.takeRow(idx)
def open_field_map(self, view, models):
"""
Connects a MapGridDialog to help the user map field names that
are different between the source DataFrameModel and the
selected merge or suppression DataFrameModel.
:param view: (QtGui.QTableView)
The view that has a selected filepath
:param models: (dict)
The dictionary of {file_path:DataFrameModel} where
dataframe columns can be gathered from.
:return: None
"""
idx = view.selectedIndexes()[0]
view_model = view.model()
view_item = view_model.item(idx.row())
view_item_text = view_item.text()
try:
self._field_map_grids[view_item_text].show()
except KeyError:
dfmodel = models[view_item_text]
colmodel = dfmodel._dataFrame.columns.tolist()
if self.source_model is None:
self.set_source_model()
source_colmodel = self.source_model._dataFrame.columns.tolist()
fmap = MapGridDialog(parent=self)
fmap.load_combo_box(source_colmodel, left=True)
fmap.load_combo_box(colmodel, left=False)
fmap.setWindowTitle("Map Fields")
fmap.labelLeft.setText(os.path.basename(self.source_model.filePath))
fmap.labelRight.setText(os.path.basename(dfmodel.filePath))
fmap.signalNewMapping.connect(lambda x: self._field_map_data.update({dfmodel.filePath: x}))
self._field_map_grids[view_item_text] = fmap
self._field_map_grids[view_item_text].show()
def get_map_grid(self, file_path):
"""
Accessor to the MergePurgeDialog._field_map_grids dictionary.
Contains map grid dialogs.
:param file_path: (str)
The filepath related to the desired MapGridDialog.
:return: (MapGridDialog, None)
"""
return self._field_map_grids.get(file_path, None)
def open_edit_file_window(self, view, models):
"""
Connects a DataFrameModel selected in the view
to a FileTableWindow where the model can be edited.
:param view: (QtGui.QTableView)
The view that has a selected filepath
:param models: (dict)
The dictionary of {file_path:DataFrameModel}
to supply the FileTableWindow
:return: None
"""
try:
idx = view.selectedIndexes()[0]
except IndexError:
raise IndexError("No file selected to open.")
vmodel = view.model()
vitem = vmodel.item(idx.row())
model = models.get(vitem.text())
fp = model.filePath
wdw = self.df_manager.get_fileview_window(fp)
# Prevent wierdos from doing an endless loop of MergePurge windows.
# That would be pretty funny, though..
wdw.actionMergePurge.setVisible(False)
wdw.show()
def execute(self):
"""
Executes the merge_purge based upon the given settings.
:return: None
"""
if self.source_model is None:
self.set_source_model()
suppressed_results = {}
merged_results = {}
source_path = self.sourcePathLineEdit.text()
dest_path = self.destPathLineEdit.text()
source_df = self.source_model.dataFrame().copy()
source_df.loc[:, 'ORIG_IDXER'] = source_df.index
source_size = source_df.index.size
index_label = self.primaryKeyComboBox.currentText()
sort_on = self.sortOnHandler.get_model_list(left=False)
ascending = self.sortAscHandler.get_model_list(left=False)
dedupe_on = self.dedupeOnHandler.get_model_list(left=False)
gather_fields = self.gatherFieldsHandler.get_model_list(left=False)
overwrite_existing = self.gatherFieldsOverWriteCheckBox.isChecked()
# Make sure ascending/sort_on lists are equal.
while len(sort_on) < len(ascending):
ascending.append(False)
while len(sort_on) > len(ascending):
ascending.pop()
# Get all merge models and merge.
# Absorb all rows and columns
for file_path, merge_model in self._merge_files.items():
pre_size = source_df.index.size
other_df = merge_model.dataFrame()
if gather_fields:
assert index_label in other_df.columns, "DataFrameModel for {} missing column {}".format(
merge_model.filePath, index_label)
source_df = gather_frame_fields(source_df, other_df, index_label=index_label,
fields=gather_fields, copy_frames=True,
append_missing=True, overwrite=overwrite_existing)
else:
source_df = pd.concat([source_df, other_df])
merged_results.update({merge_model.filePath: source_df.index.size - pre_size})
# Get all suppression models and suppress.
for file_path, suppress_model in self._purge_files.items():
map_dict = self._field_map_data.get(file_path, {})
sframe = suppress_model.dataFrame().copy()
sframe.drop(['ORIG_IDXER'], axis=1, inplace=True, errors='ignore')
if map_dict:
# A mapping exists - rename the data and get the key_cols
key_cols = list(map_dict.values())
sframe.rename(columns=map_dict, inplace=True)
else:
# No mapping exists - Try to use the dedupe_on cols as key_cols
key_cols = dedupe_on.copy()
missing = [x for x in key_cols if x not in sframe.columns]
if missing:
raise KeyError("Suppression file {} must have a field mapping or \
have the dedupe column labels, it has neither!.".format(
suppress_model.filePath))
sframe = sframe.loc[:, key_cols].drop_duplicates(key_cols)
badframe = pd.merge(source_df, sframe, how='inner', left_on=key_cols, right_on=key_cols)
source_df = source_df.loc[~source_df.index.isin(badframe.loc[:, 'ORIG_IDXER'].tolist()), :]
suppressed_results.update({suppress_model.filePath: badframe.index.size})
# Sort the data
if sort_on and ascending:
source_df.sort_values(sort_on, ascending=ascending, inplace=True)
# Deduplicate the data.
if dedupe_on:
pre_size = source_df.index.size
source_df.drop_duplicates(dedupe_on, inplace=True)
dedupe_lost = pre_size - source_df.index.size
else:
dedupe_lost = 0
# Export the data - done!
source_df.drop(['ORIG_IDXER'], axis=1, inplace=True, errors='ignore')
source_df.to_csv(dest_path, index=False)
logging.info("Exported: {}".format(dest_path))
merge_string = "\n".join("Gained {} merging {}".format(v, k) for k, v in merged_results.items())
suppress_string = "\n".join("Lost {} suppressing {}".format(v, k) for k,v in suppressed_results.items())
report = """
Merge Purge Report
==================
Original Size: {}
Final Size: {}
Source Path: {}
Output Path: {}
Merge:
==================
{}
Purge:
==================
{}
Sort:
==================
SORT BY: {}
SORT ASCENDING: {}
Dedupe:
==================
DEDUPE ON: {}
RECORDS LOST: {}
""".format(source_size, source_df.index.size, source_path,
dest_path, merge_string, suppress_string,
sort_on, ascending, dedupe_on, dedupe_lost)
report_path = os.path.splitext(dest_path)[0] + "_report.txt"
with open(report_path, "w") as fh:
fh.write(report)
self.signalExecuted.emit(source_path, dest_path, report_path)
def get_settings(self, dc:DictConfig = None, section="MERGE_PURGE") -> DictConfig:
"""
Gathers the settings out of the Dialog and
returns a DictConfig object with updated settings.
:param dc (DictConfig, default None)
An optional DictConfig object, one is created if none is given.
:param section (str, default 'MERGE_PURGE')
An optional section name to apply settings to.
A pre-existing section with this name would be overwritten.
:return: (DictConfig)
An updated DictConfig object.
"""
if dc is None:
dc = DictConfig()
if dc.has_section(section):
dc.remove_section(section)
dc.add_section(section)
dc.set_safe(section, 'source_path', self.sourcePathLineEdit.text())
dc.set_safe(section, 'dest_path', self.destPathLineEdit.text())
dc.set_safe(section, 'primary_key', self.primaryKeyComboBox.currentText())
dc.set_safe(section, 'dedupe_on', self.dedupeOnHandler.get_model_list(left=False))
dc.set_safe(section, 'gather_fields', self.gatherFieldsHandler.get_model_list(left=False))
dc.set_safe(section, 'gather_fields_overwrite', self.gatherFieldsOverWriteCheckBox.isChecked())
dc.set_safe(section, 'sort_on', self.sortOnHandler.get_model_list(left=False))
dc.set_safe(section, 'sort_ascending', self.sortAscHandler.get_model_list(left=False))
dc.set_safe(section, 'unique_fields', self.uniqueFieldsHandler.get_model_list(left=False))
dc.set_safe(section, 'field_map_data', self._field_map_data)
dc.set_safe(section, 'merge_files', list(self._merge_files.keys()))
dc.set_safe(section, 'purge_files', list(self._purge_files.keys()))
return dc
def set_settings(self, dc:DictConfig, section="MERGE_PURGE"):
"""
Applies settings from a DictConfig object to the Dialog.
:param dc (DictConfig, default None)
The DictConfig object that contains the settings to be applied.
:param section (str, default 'MERGE_PURGE')
The section name to read settings from.
:return:
"""
source_path = dc.get(section, 'source_path', fallback=self.sourcePathLineEdit.text())
current_path = self.sourcePathLineEdit.text()
if source_path != current_path:
dfm = self.df_manager.read_file(source_path)
dest = dc.get(section, 'dest_path', fallback=None)
self.set_source_model(dfm, configure=False)
self.set_line_edit_paths(source_path, dest_path=dest)
self.primaryKeyComboBox.clear()
self.set_primary_key_combo_box()
key_id = self.primaryKeyComboBox.findText(dc.get(section, 'primary_key',
fallback=self.primaryKeyComboBox.currentText()))
dedupe_on = dc.get_safe(section, 'dedupe_on', fallback=None)
sort_on = dc.get_safe(section, 'sort_on', fallback=None)
gather_fields = dc.get_safe(section, 'gather_fields', fallback=None)
unique_fields = dc.get_safe(section, 'unique_fields', fallback=None)
gather_fields_overwrite = dc.getboolean(section, 'gather_fields_overwrite', fallback=False)
sort_ascending = dc.get_safe(section, 'sort_ascending', fallback=None)
merge_files = dc.get_safe(section, 'merge_files', fallback=[])
purge_files = dc.get_safe(section, 'purge_files', fallback=[])
field_map_data = dc.get_safe(section, 'field_map_data', fallback={})
self.primaryKeyComboBox.setCurrentIndex(key_id)
self.set_push_grid_handlers(column_model=None, sorton_model=sort_on, sortasc_model=sort_ascending,
dedupe_model=dedupe_on, gather_model=gather_fields, unique_model=unique_fields)
self.gatherFieldsOverWriteCheckBox.setChecked(gather_fields_overwrite)
self._field_map_data.update(field_map_data)
self.open_file(file_names=merge_files, model_signal=self.signalMergeFileOpened)
self.open_file(file_names=purge_files, model_signal=self.signalSFileOpened)
def reset(self):
"""
Resets ListViews/CheckBoxes.
The source/dest line edits are left alone
The suppression/merge files are also left alone.
:return: None
"""
self.set_push_grid_handlers()
self.set_handler_sort_asc(overwrite=True)
self.set_primary_key_combo_box(reset=True)
self.gatherFieldsOverWriteCheckBox.setChecked(False)
def set_primary_key_combo_box(self, default=None, reset=False):
"""
Sets the primary key combo box.
:param default: (str, default None)
An optional default field name to select.
:return:
"""
if default is None and reset is False:
current_model = self.primaryKeyComboBox.model()
if current_model:
default = self.primaryKeyComboBox.currentText()
combo_model = create_standard_item_model([''] + self.source_model.dataFrame().columns.tolist(),
editable=False, checkable=True)
self.primaryKeyComboBox.setModel(combo_model)
if default is not None:
self.primaryKeyComboBox.setCurrentIndex(self.primaryKeyComboBox.findText(default))
def import_settings(self, from_path=None):
"""
Imports settings to the Dialog from a file.
:param from_path: (str, default None)
None makes the user enter a file path.
:return:
"""
if from_path is None:
try:
dirname = os.path.dirname(self.sourcePathLineEdit.text())
except:
dirname = ''
from_path = QtGui.QFileDialog.getOpenFileName(self, dir=dirname)[0]
config = SettingsINI(filename=from_path)
self.set_settings(config)
def export_settings(self, to=None):
"""
Exports settings from the Dialog to a file.
:param to: (str, default None)
None makes the user enter a file path.
:return: None
"""
if to is None:
try:
dirname = os.path.dirname(self.sourcePathLineEdit.text())
except:
dirname = ''
to = QtGui.QFileDialog.getSaveFileName(self, dir=dirname)[0]
config = self.get_settings()
config.save_as(to, set_self=True)
| mit |
heliazandi/volttron-applications | pnnl/PGnE/pgne/agent.py | 3 | 12179 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2016, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import os
import sys
import logging
import datetime
from dateutil import parser
from volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat
from volttron.platform.agent import utils
from volttron.platform.agent.utils import (get_aware_utc_now,
format_timestamp)
import pandas as pd
import statsmodels.formula.api as sm
utils.setup_logging()
_log = logging.getLogger(__name__)
class PGnEAgent(Agent):
def __init__(self, config_path, **kwargs):
super(PGnEAgent, self).__init__(**kwargs)
self.config = utils.load_config(config_path)
self.site = self.config.get('campus')
self.building = self.config.get('building')
self.temp_unit = self.config.get('temp_unit')
self.power_unit = self.config.get('power_unit')
self.out_temp_name = self.config.get('out_temp_name')
self.power_name = self.config.get('power_name')
self.aggregate_in_min = self.config.get('aggregate_in_min')
self.aggregate_freq = str(self.aggregate_in_min) + 'Min'
self.ts_name = self.config.get('ts_name')
self.window_size_in_day = int(self.config.get('window_size_in_day'))
self.min_required_window_size_in_percent = float(self.config.get('min_required_window_size_in_percent'))
self.interval_in_min = int(self.config.get('interval_in_min'))
self.no_of_recs_needed = 10 # self.window_size_in_day * 24 * (60 / self.interval_in_min)
self.min_no_of_records_needed_after_aggr = int(self.min_required_window_size_in_percent/100 *
self.no_of_recs_needed/self.aggregate_in_min)
self.schedule_run_in_sec = int(self.config.get('schedule_run_in_hr')) * 3600
# Testing
#self.no_of_recs_needed = 200
#self.min_no_of_records_needed_after_aggr = self.no_of_recs_needed/self.aggregate_in_min
@Core.receiver('onstart')
def onstart(self, sender, **kwargs):
self.core.periodic(self.schedule_run_in_sec, self.calculate_latest_coeffs)
def calculate_latest_coeffs(self):
unit_topic_tmpl = "{campus}/{building}/{unit}/{point}"
unit_points = [self.power_name]
df = None
#Get data
unit = self.temp_unit
for point in unit_points:
if point == self.power_name:
unit = self.power_unit
unit_topic = unit_topic_tmpl.format(campus=self.site,
building=self.building,
unit=unit,
point=point)
result = self.vip.rpc.call('platform.historian',
'query',
topic=unit_topic,
count=self.no_of_recs_needed,
order="LAST_TO_FIRST").get(timeout=10000)
df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])
df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])
df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()
# df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))
df = df2 if df is None else pd.merge(df, df2, how='outer', left_index=True, right_index=True)
#Calculate coefficients
result_df = self.calculate_coeffs(df)
# Publish coeffs to store
#if coeffs is not None:
# self.save_coeffs(coeffs, subdevice)
def convert_units_to_SI(self, df, point, unit):
if unit == 'degreesFahrenheit':
df[point] = (df[point]-32) * 5/9
# Air state assumption: http://www.remak.eu/en/mass-air-flow-rate-unit-converter
# 1cfm ~ 0.00055kg/s
if unit == 'cubicFeetPerMinute':
df[point] = df[point] * 0.00055
def calculate_coeffs(self, dP):
dP['time'] = dP['posttime']
dP = dP.set_index(['posttime'])
dP.index = pd.to_datetime(dP.index)
dP['time'] = pd.to_datetime(dP['time'])
#### Delete the weekend
dP.columns = ["Tout", "wbe", "Weekday", "time"]
dP['year'] = dP.index.year
dP['month'] = dP.index.month
dP['hour'] = dP.index.hour
dP['day'] = dP.index.day
dP = dP[dP.Weekday != 'Sun']
dP = dP[dP.Weekday != 'Sat']
#### Hourly average value
df = dP.resample('60min').mean()
dP2 = dP.resample('60min').mean()
dP = dP[dP.Tout < 150]
dP = dP[dP.Tout > 20]
dP = dP.dropna()
df = df.pivot_table(index=["year", "month", "day"], columns=["hour"], values=["wbe", "Tout"])
# ### Average using high five outdoor temperature data based on 10 day moving windows
leng = len(df.index)
for i in range(0, leng):
for j in range(0, 24):
df['power', j] = df.ix[i:i + 10, :].sort([('Tout', j)], ascending=False).head(5).ix[:,
j + 24:j + 25].mean()
for i in range(0, leng):
for j in range(0, 24):
df['power', j][i:i + 1] = df.ix[i:i + 10, :].sort([('Tout', j)], ascending=False).head(5).ix[:,
j + 24:j + 25].mean()
# ### Average based on 10 day moving windows
for i in range(0, 24):
df['Tout_avg', i] = df.ix[:, i:i + 1].rolling(window=10, min_periods=10).mean()
for i in range(0, 24):
df['Pow_avg', i] = df.ix[:, i + 24:i + 25].rolling(window=10, min_periods=10).mean()
df = df.stack(level=['hour'])
df.power = df.power.shift(216)
df = df.dropna()
dq = df.reset_index()
dq['Data'] = pd.to_datetime(
dq.year.astype(int).apply(str) + '/' + dq.month.astype(int).apply(str) + '/' + dq.day.astype(int).apply(
str) + ' ' + dq.hour.astype(int).apply(str) + ":00", format='%Y/%m/%d %H:%M')
dq = dq.set_index(['Data'])
dq = dq.drop(['year', 'month', 'day', 'hour'], axis=1)
dk = dq
### Adjusted average using high five outdoor temperature data based on 10 day moving windows
lengnth = len(dq.index)
lengnth = lengnth - 4
dq["Adj"] = 1.0
for i in range(0, lengnth):
dq['Adj'][i + 4] = (dq['wbe'][i:i + 4].mean()) / (dq['Pow_avg'][i:i + 4].mean())
dq['Pow_adj'] = dq['Pow_avg'] * dq['Adj']
#### Adjusted average based on 10 day moving windows
lengnth = len(dq.index)
lengnth = lengnth - 4
dq["Adj2"] = 1.0
for i in range(0, lengnth):
dq['Adj2'][i + 4] = (dq['wbe'][i:i + 4].mean()) / (dq['power'][i:i + 4].mean())
dq['Adj2'] = dq.Adj2.shift(2)
dq['power_adj'] = dq['power'] * dq['Adj2']
return dq
def save_coeffs(self, coeffs, subdevice):
topic_tmpl = "analysis/TCM/{campus}/{building}/{unit}/{subdevice}/"
topic = topic_tmpl.format(campus=self.site,
building=self.building,
unit=self.unit,
subdevice=subdevice)
T_coeffs = coeffs["T_fit"]
Q_coeffs = coeffs["Q_fit"]
headers = {'Date': format_timestamp(get_aware_utc_now())}
for idx in xrange(0,5):
T_topic = topic + "T_c" + str(idx)
Q_topic = topic + "Q_c" + str(idx)
self.vip.pubsub.publish(
'pubsub', T_topic, headers, T_coeffs.params[idx])
self.vip.pubsub.publish(
'pubsub', Q_topic, headers, Q_coeffs.params[idx])
_log.debug(T_coeffs.params)
_log.debug(Q_coeffs.params)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
try:
utils.vip_main(PGnEAgent)
except Exception as e:
_log.exception('unhandled exception')
def test_ols():
'''To compare result of pandas and R's linear regression'''
import os
test_csv = '../test_data/tcm_ZONE_VAV_150_data.csv'
df = pd.read_csv(test_csv)
config_path = os.environ.get('AGENT_CONFIG')
tcm = PGnEAgent(config_path)
coeffs = tcm.calculate_coeffs(df)
if coeffs is not None:
T_coeffs = coeffs["T_fit"]
Q_coeffs = coeffs["Q_fit"]
_log.debug(T_coeffs.params)
_log.debug(Q_coeffs.params)
def test_api():
'''To test Volttron APIs'''
import os
topic_tmpl = "{campus}/{building}/{unit}/{subdevice}/{point}"
tcm = PGnEAgent(os.environ.get('AGENT_CONFIG'))
topic1 = topic_tmpl.format(campus='PNNL',
building='SEB',
unit='AHU1',
subdevice='VAV123A',
point='MaximumZoneAirFlow')
result = tcm.vip.rpc.call('platform.historian',
'query',
topic=topic1,
count=20,
order="LAST_TO_FIRST").get(timeout=100)
assert result is not None
if __name__ == '__main__':
# Entry point for script
sys.exit(main())
#test_api()
| bsd-3-clause |
tomevans/pyphotom | photom_class.py | 1 | 14267 | import numpy as np
import matplotlib.pyplot as plt
import pdb
import os
import cPickle
import numpy as np
import shutil
from photom import photom_inspect, photom_reduce, photom_absolute, photom_relative, photom_checks, photom_optimise
homestr = os.path.expanduser( '~' )
class photom():
"""
"""
def __init__(self):
"""
Initalise a default photom object.
"""
self.analysis_dir = ''
self.nstars = None
self.image_list = None
self.bias_list = None
self.dark_list = None
self.flat_list = None
self.ccdproc_params = 'default'
self.master_bias = None
self.master_dark = None
self.master_flat = None
self.red_image_list = None
self.nimages_total = None
self.nimages_good = None
self.goodbad_flags = None
self.coords_input_files = None
self.coords_input_type = None
self.photpars = 'default'
self.fitskypars = 'default'
self.centerpars = 'default'
self.datapars = 'default'
self.dat_files = None
self.absphot_file = None
self.relphot_file = None
return None
def set_attributes( self, analysis_dir=None, image_list=None, bias_list=None, dark_list=None, \
flat_list=None, ccdproc_params=None, ap_params=None, master_bias=None, \
master_dark=None, master_flat=None, red_image_list=None, nimages_total=None, \
nimages_good=None, goodbad_flags=None, nstars=None, coords_input_files=None, \
coords_input_type=None, photpars=None, fitskypars=None, centerpars=None, \
datapars=None, dat_files=None, absphot_file=None, relphot_file=None ):
"""
Set photom object parameters.
"""
if analysis_dir!=None: self.analysis_dir = analysis_dir.replace( '~', homestr )
if self.analysis_dir=='': self.analysis_dir = os.getcwd()
if image_list!=None:
if (os.path.dirname(image_list)==''):
self.image_list = str(self.analysis_dir+'/'+image_list).replace('//','/')
else:
self.image_list = image_list
if red_image_list!=None:
if (os.path.dirname(red_image_list)==''):
self.red_image_list = str(self.analysis_dir+'/'+red_image_list).replace('//','/')
else:
self.red_image_list = red_image_list
if bias_list!=None:
if (os.path.dirname(bias_list)==''):
self.bias_list = str(self.analysis_dir+'/'+bias_list).replace('//','/')
else:
self.bias_list = bias_list
if dark_list!=None:
if (os.path.dirname(dark_list)==''):
self.dark_list = str(self.analysis_dir+'/'+dark_list).replace('//','/')
else:
self.dark_list = dark_list
if flat_list!=None:
if (os.path.dirname(flat_list)==''):
self.flat_list = str(self.analysis_dir+'/'+flat_list).replace('//','/')
else:
self.flat_list = flat_list
if coords_input_files!=None:
if np.rank(coords_input_files)==0:
self.coords_input_files = [str(self.analysis_dir+'/'+coords_input_files).replace('//','/')]
else:
self.coords_input_files = []
for coords_input in coords_input_files:
if os.path.dirname(coords_input)=='':
coords_input_full = str(self.analysis_dir+'/'+coords_input).replace('//','/')
else:
coords_input_full = coords_input
self.coords_input_files = self.coords_input_files+[coords_input_full]
if coords_input_type!=None: self.coords_input_type = coords_input_type
if ccdproc_params!=None: self.ccdproc_params = ccdproc_params
if ap_params!=None: self.ap_params = ap_params
if master_bias!=None: self.master_bias = master_bias
if master_dark!=None: self.master_dark = master_dark
if master_flat!=None: self.master_flat = master_flat
if red_image_list!=None: self.red_image_list = red_image_list
if goodbad_flags!=None: self.goodbad_flags = goodbad_flags
if nimages_total!=None: self.nimages_total = nimages_total
if nimages_good!=None: self.nimages_good = nimages_good
if nstars!=None: self.nstars = nstars
if photpars!=None: self.photpars = photpars
if fitskypars!=None: self.fitskypars = fitskypars
if centerpars!=None: self.centerpars = centerpars
if datapars!=None: self.datapars = datapars
if dat_files!=None: self.dat_files = dat_files
if absphot_file!=None: self.absphot_file = absphot_file
if relphot_file!=None: self.relphot_file = relphot_file
self.pickle_obj()
return None
def inspect_images( self, obstime_kw=None, iraf_display_mode='display' ):
"""
"""
photom_inspect.Main( self, obstime_kw=obstime_kw, iraf_display_mode=iraf_display_mode )
self.pickle_obj()
return None
def reduce_images( self, use_previous=False, ccdproc_ccdtype='default', ccdproc_overscan='default', \
ccdproc_trim='default', ccdproc_fixpix='default', ccdproc_illumcor='default', \
ccdproc_fringecor='default', ccdproc_readcor='default', ccdproc_scancor='default', \
ccdproc_interactive='default', ccdproc_biassec='default', ccdproc_trimsec='default' ):
"""
"""
if self.ccdproc_params=='custom':
photom_reduce.custom_ccdproc_params( ccdproc_ccdtype=ccdproc_ccdtype, ccdproc_overscan=ccdproc_overscan, \
ccdproc_trim=ccdproc_trim, ccdproc_fixpix=ccdproc_fixpix, \
ccdproc_illumcor=ccdproc_illumcor, ccdproc_fringecor=ccdproc_fringecor, \
ccdproc_readcor=ccdproc_readcor, ccdproc_scancor=ccdproc_scancor, \
ccdproc_interactive=ccdproc_interactive, ccdproc_biassec=ccdproc_biassec, \
ccdproc_trimsec=ccdproc_trimsec )
elif self.ccdproc_params=='default':
photom_reduce.default_ccdproc_params(self)
if use_previous==False:
photom_reduce.Main(self)
else:
self.self_update()
self.pickle_obj()
return None
def optimise_aperture( self, ap_size_trials, sky_annulus_trials, sky_dannulus, gain_kw=None, readnoise_kw=None, \
exptime_kw=None, obstime_kw=None, airmass_kw=None, ix_target=None, ix_comparisons=None ):
"""
Searches a grid of aperture radii and sky annulus radii for the combination that
minimises the scatter of the relative photometry.
"""
scatter_array = photom_optimise.Main( self, ap_size_trials, sky_annulus_trials, sky_dannulus, datapars_gain=gain_kw, \
datapars_readnoise=readnoise_kw, datapars_exposure=exptime_kw, \
datapars_obstime=obstime_kw, datapars_airmass=airmass_kw, ix_target=ix_target, \
ix_comparisons=ix_comparisons )
return scatter_array
def do_absphot( self, photpars_apertures='default', fitskypars_annulus='default', fitskypars_dannulus='default', \
fitskypars_salgorithm='default', centerpars_maxshift='default', centerpars_cbox='default', \
centerpars_minsnratio='default', datapars_gain='default', datapars_readnoise='default', \
datapars_exposure='default', datapars_obstime='default', datapars_airmass='default', make_plots=True ):
"""
Does absolute photometry for one or more stars given a list of images.
Output is generated in the form of two types of file:
1. starX_absphot.dat for X=0,1,2,... files contain columns with the
more detailed output for each of the stars, with each line
corresponding to a different image.
2. absolute.phot file containing the important numerical columns for
each of the stars; it's supposed to be the most convenient output
for use with numpy and for generating relative photometry.
Summary plots are also generated by default:
Figure 1:
** Top left = traces of xy drift for each of the stars
** Bottom left = airmass versus time
** Top right = absolute flux versus time for each star
** Bottom right = sky annulus value as a function of time for each star
Figure 2:
?? Plots image number versus measured scatter divided by the calculated Poisson noise ??
"""
if self.photpars=='custom':
photom_absolute.custom_photpars( self, photpars_apertures=photpars_apertures )
elif self.photpars=='default':
photom_absolute.default_photpars( self )
if self.fitskypars=='custom':
photom_absolute.custom_fitskypars( self, fitskypars_annulus=fitskypars_annulus, fitskypars_dannulus=fitskypars_dannulus, \
fitskypars_salgorithm=fitskypars_salgorithm )
elif self.fitskypars=='default':
photom_absolute.default_fitskypars( self )
if self.centerpars=='custom':
photom_absolute.custom_centerpars( self, centerpars_maxshift=centerpars_maxshift, centerpars_cbox=centerpars_cbox, \
centerpars_minsnratio=centerpars_minsnratio )
elif self.centerpars=='default':
photom_absolute.default_centerpars( self )
if self.datapars=='custom':
photom_absolute.custom_datapars( self, datapars_gain=datapars_gain, datapars_readnoise=datapars_readnoise, \
datapars_exposure=datapars_exposure, datapars_obstime=datapars_obstime, \
datapars_airmass=datapars_airmass )
elif self.datapars=='default':
photom_absolute.default_datapars( self )
photom_absolute.Main( self, make_plots=make_plots )
self.pickle_obj()
return None
def do_relphot( self, ix_target=None, ix_comparisons=None, make_plots=True ):
"""
Calculates relative fluxes using absolute photometry already stored in the photom object.
Must specify indices for the target star and comparison stars to be used, using the format
0,1,2,... etc where 0 is the first star.
"""
photom_relative.Main( self, ix_target=ix_target, ix_comparisons=ix_comparisons, make_plots=make_plots )
self.pickle_obj()
return None
def check_relphot( self ):
"""
Does two basic checks of the relative photometry in an effort to identify
variable comparison stars. Output is in the form of plots that must be
visually inspected to identify variable stars.
The two types of checks are:
1. All possible pairs of stars that can be made up from the target
and comparisons are checked.
2. A leave-one-out approach is taken, where the relative photometry is
repeated multiple times, with a different comparison star excluded
each time.
"""
photom_checks.comparisons( self )
return None
def update_auxvars( self, headerkws=None ):
"""
Will update the auxiliary variables within the photom object. This is done
using the photometry already contained in the object to calculate the total
sum of all stellar fluxes, as well as extracting header information from
images stored in the red_images_list variable.
"""
photom_checks.auxiliary_variables( self, headerkws=headerkws )
return None
def self_update( self ):
"""
Routine used to generate default values for a few variables, eg. if
a certain analysis step, such as reduce_images(), has already been
performed and so does not need to be repeated.
!!NOTE: This routine is currently pretty ad-hoc and could possibly do with a
rethink plus the addition of various features that have been added to the
overall pipeline since I first wrote this particular routine a while back.
"""
# Count the total number of images:
try:
red_images = np.loadtxt(self.red_image_list, dtype='str')
self.nimages_total = len(red_images)
except:
pass
# Set the goodbad_flags to all be good:
if self.goodbad_flags==None:
try:
self.goodbad_flags = np.ones(self.nimages_total)
except:
pass
# Count the number of good images:
self.nimages_good = int(np.sum(self.goodbad_flags))
self.pickle_obj()
return None
def pickle_obj( self, quiet=False ):
"""
Pickle the photom object in its current state. Saves the output as photom_object.pkl in the
analysis directory.
"""
outfile_name = str( self.analysis_dir + '/photom_object.pkl' ).replace( '//', '/' )
outfile_open = open( outfile_name, 'w' )
cPickle.dump( self, outfile_open )
outfile_open.close()
if quiet==False:
print '\nSaved %s\n' % outfile_name
self.pickled_output = outfile_name
return None
def backup_the_pickle( self ):
"""
Makes a backup of the current photom_object.pkl, saving the backed up version as photom_object.pkl.BACKUP
in the analysis directory.
"""
pkl_name = str( self.analysis_dir + '/photom_object.pkl' ).replace( '//', '/' )
shutil.copyfile( pkl_name, pkl_name + '.BACKUP' )
print '\nBacked up pickled photom object'
return None
| gpl-2.0 |