repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
DiCarloLab-Delft/PycQED_py3 | pycqed/utilities/pulse_scheme.py | 1 | 5469 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
def new_pulse_fig(figsize):
'''
Open a new figure and configure it to plot pulse schemes.
'''
fig, ax = plt.subplots(1, 1, figsize=figsize, frameon=False)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return fig, ax
def new_pulse_subplot(fig, *args, **kwargs):
'''
Add a new subplot configured for plotting pulse schemes to a figure.
All *args and **kwargs are passed to fig.add_subplot.
'''
ax = fig.add_subplot(*args, **kwargs)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return ax
def mwPulse(ax, pos, y_offs=0, width=1.5, amp=1, label=None, phase=0, labelHeight=1.3,
color='C0', modulation='normal', **plot_kws):
'''
Draw a microwave pulse: Gaussian envelope with modulation.
'''
x = np.linspace(pos, pos + width, 100)
envPos = amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
envNeg = -amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
if modulation == 'normal':
mod = envPos * np.sin(2 * np.pi * 3 / width * x + phase)
elif modulation == 'high':
mod = envPos * np.sin(5 * np.pi * 3 / width * x + phase)
else:
raise ValueError()
ax.plot(x, envPos+y_offs, '--', color=color, **plot_kws)
ax.plot(x, envNeg+y_offs, '--', color=color, **plot_kws)
ax.plot(x, mod+y_offs, '-', color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='right', color=color)
return pos + width
def fluxPulse(ax, pos, y_offs=0, width=2.5, s=.1, amp=1.5, label=None, labelHeight=1.7,
color='C1', **plot_kws):
'''
Draw a smooth flux pulse, where the rising and falling edges are given by
Fermi-Dirac functions.
s: smoothness of edge
'''
x = np.linspace(pos, pos + width, 100)
y = amp / ((np.exp(-(x - (pos + 5.5 * s)) / s) + 1) *
(np.exp((x - (pos + width - 5.5 * s)) / s) + 1))
ax.fill_between(x, y+y_offs, color=color, alpha=0.3)
ax.plot(x, y+y_offs, color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='center', color=color)
return pos + width
def ramZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a Ram-Z flux pulse, i.e. only part of the pulse is shaded, to indicate
cutting off the pulse at some time.
'''
xLeft = np.linspace(pos, pos + sep, 100)
xRight = np.linspace(pos + sep, pos + width, 100)
xFull = np.concatenate((xLeft, xRight))
y = amp / ((np.exp(-(xFull - (pos + 5.5 * s)) / s) + 1) *
(np.exp((xFull - (pos + width - 5.5 * s)) / s) + 1))
yLeft = y[:len(xLeft)]
ax.fill_between(xLeft, yLeft+y_offs, alpha=0.3, color=color, linewidth=0.0)
ax.plot(xFull, y+y_offs, color=color)
return pos + width
def modZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a modulated Z pulse.
'''
return pos + width
def interval(ax, start, stop, y_offs = 0, height=1.5, label=None, labelHeight=None,
vlines=True, color='k', arrowstyle='<|-|>', **plot_kws):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = height + 0.2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(start, height+y_offs), posB=(stop, height+y_offs), arrowstyle=arrowstyle,
color=color, mutation_scale=7, **plot_kws)
ax.add_patch(arrow)
if vlines:
ax.plot([start, start], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
ax.plot([stop, stop], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
if label is not None:
ax.text((start + stop) / 2, labelHeight+y_offs, label, color=color,
horizontalalignment='center')
def interval_vertical(ax, start, stop, position, label=None, labelHeight=None,
color='k', arrowstyle='<|-|>', labeloffset: float = 0,
horizontalalignment='center'):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = (start+stop)/2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(position, start), posB=(position, stop), arrowstyle=arrowstyle,
color=color, mutation_scale=7)
ax.add_patch(arrow)
if label is not None:
ax.text(position+labeloffset, labelHeight, label, color=color,
horizontalalignment=horizontalalignment)
def meter(ax, x0, y0, y_offs=0, w=1.1, h=.8, color='black', fillcolor=None):
"""
Draws a measurement meter on the specified position.
"""
if fillcolor == None:
fill = False
else:
fill = True
p1 = matplotlib.patches.Rectangle(
(x0-w/2, y0-h/2+y_offs), w, h, facecolor=fillcolor, edgecolor=color,
fill=fill, zorder=5)
ax.add_patch(p1)
p0 = matplotlib.patches.Wedge(
(x0, y0-h/4+y_offs), .4, theta1=40, theta2=180-40, color=color, lw=2,
width=.01, zorder=5)
ax.add_patch(p0)
ax.arrow(x0, y0-h/4+y_offs, dx=.5*np.cos(np.deg2rad(70)),
dy=.5*np.sin(np.deg2rad(60)), width=.03, color=color, zorder=5)
| mit |
bthirion/nipy | examples/labs/need_data/localizer_glm_ar.py | 3 | 5428 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Full step-by-step example of fitting a GLM to experimental data and visualizing
the results.
More specifically:
1. A sequence of fMRI volumes are loaded
2. A design matrix describing all the effects related to the data is computed
3. a mask of the useful brain volume is computed
4. A GLM is applied to the dataset (effect/covariance,
then contrast estimation)
Note that this corresponds to a single run.
Needs matplotlib
Author : Bertrand Thirion, 2010--2012
"""
print(__doc__)
from os import mkdir, getcwd, path
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nibabel import save
from nipy.modalities.fmri.glm import FMRILinearModel
from nipy.modalities.fmri.design_matrix import make_dmtx
from nipy.modalities.fmri.experimental_paradigm import \
load_paradigm_from_csv_file
from nipy.labs.viz import plot_map, cm
# Local import
from get_data_light import DATA_DIR, get_first_level_dataset
#######################################
# Data and analysis parameters
#######################################
# volume mask
# This dataset is large
get_first_level_dataset()
data_path = path.join(DATA_DIR, 's12069_swaloc1_corr.nii.gz')
paradigm_file = path.join(DATA_DIR, 'localizer_paradigm.csv')
# timing
n_scans = 128
tr = 2.4
# paradigm
frametimes = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans)
# confounds
hrf_model = 'canonical with derivative'
drift_model = "cosine"
hfcut = 128
# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
mkdir(write_dir)
print('Computation will be performed in directory: %s' % write_dir)
########################################
# Design matrix
########################################
print('Loading design matrix...')
paradigm = load_paradigm_from_csv_file(paradigm_file)['0']
design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model, hfcut=hfcut)
ax = design_matrix.show()
ax.set_position([.05, .25, .9, .65])
ax.set_title('Design matrix')
plt.savefig(path.join(write_dir, 'design_matrix.png'))
#########################################
# Specify the contrasts
#########################################
# simplest ones
contrasts = {}
n_columns = len(design_matrix.names)
for i in range(paradigm.n_conditions):
contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]
# and more complex/ interesting ones
contrasts["audio"] = contrasts["clicDaudio"] + contrasts["clicGaudio"] +\
contrasts["calculaudio"] + contrasts["phraseaudio"]
contrasts["video"] = contrasts["clicDvideo"] + contrasts["clicGvideo"] + \
contrasts["calculvideo"] + contrasts["phrasevideo"]
contrasts["left"] = contrasts["clicGaudio"] + contrasts["clicGvideo"]
contrasts["right"] = contrasts["clicDaudio"] + contrasts["clicDvideo"]
contrasts["computation"] = contrasts["calculaudio"] + contrasts["calculvideo"]
contrasts["sentences"] = contrasts["phraseaudio"] + contrasts["phrasevideo"]
contrasts["H-V"] = contrasts["damier_H"] - contrasts["damier_V"]
contrasts["V-H"] = contrasts["damier_V"] - contrasts["damier_H"]
contrasts["left-right"] = contrasts["left"] - contrasts["right"]
contrasts["right-left"] = contrasts["right"] - contrasts["left"]
contrasts["audio-video"] = contrasts["audio"] - contrasts["video"]
contrasts["video-audio"] = contrasts["video"] - contrasts["audio"]
contrasts["computation-sentences"] = contrasts["computation"] - \
contrasts["sentences"]
contrasts["reading-visual"] = contrasts["sentences"] * 2 - \
contrasts["damier_H"] - contrasts["damier_V"]
contrasts['effects_of_interest'] = np.eye(25)[:20:2]
########################################
# Perform a GLM analysis
########################################
print('Fitting a GLM (this takes time)...')
fmri_glm = FMRILinearModel(data_path, design_matrix.matrix,
mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')
#########################################
# Estimate the contrasts
#########################################
print('Computing contrasts...')
for index, (contrast_id, contrast_val) in enumerate(contrasts.items()):
print(' Contrast % 2i out of %i: %s' %
(index + 1, len(contrasts), contrast_id))
# save the z_image
image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id)
z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True)
save(z_map, image_path)
# Create snapshots of the contrasts
vmax = max(- z_map.get_data().min(), z_map.get_data().max())
if index > 0:
plt.clf()
plot_map(z_map.get_data(), z_map.get_affine(),
cmap=cm.cold_hot,
vmin=- vmax,
vmax=vmax,
anat=None,
cut_coords=None,
slicer='z',
black_bg=True, # looks much better thus
figure=10,
threshold=2.5)
plt.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id))
print("All the results were witten in %s" % write_dir)
plt.show()
| bsd-3-clause |
kyleam/seaborn | examples/elaborate_violinplot.py | 30 | 1055 | """
Violinplot from a wide-form dataset
===================================
_thumb: .6, .45
"""
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
# Load the example dataset of brain network correlations
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Pull out a specific subset of networks
used_networks = [1, 3, 4, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Compute the correlation matrix and average over networks
corr_df = df.corr().groupby(level="network").mean()
corr_df.index = corr_df.index.astype(int)
corr_df = corr_df.sort_index().T
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 6))
# Draw a violinplot with a narrower bandwidth than the default
sns.violinplot(data=corr_df, palette="Set3", bw=.2, cut=1, linewidth=1)
# Finalize the figure
ax.set(ylim=(-.7, 1.05))
sns.despine(left=True, bottom=True)
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.2/tutorials/irrad_method_horvat.py | 1 | 3005 | #!/usr/bin/env python
# coding: utf-8
# Lambert Scattering (irrad_method='horvat')
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# For parameters that affect reflection and heating (irrad_frac_\*) see the tutorial on [reflection and heating](./reflection_heating.ipynb).
#
# The 'irrad_method' compute option dictates whether irradiation is handled according to the new Horvat scheme which includes Lambert Scattering, Wilson's original reflection scheme, or ignored entirely.
# In[3]:
print(b['irrad_method'])
# Influence on Light Curves (fluxes)
# ---------------------------------
#
# Let's (roughtly) reproduce Figure 8 from [Prsa et al. 2016](http://phoebe-project.org/publications/2016Prsa+) which shows the difference between Wilson and Horvat schemes for various inclinations.
#
# <img src="prsa+2016_fig8.png" alt="Figure 8" width="600px"/>
#
# First we'll roughly create a A0-K0 binary and set reasonable albedos.
# In[4]:
b['teff@primary'] = 11000
b['requiv@primary'] = 2.5
b['gravb_bol@primary'] = 1.0
b['teff@secondary'] = 5000
b['requiv@secondary'] = 0.85
b['q@binary'] = 0.8/3.0
b.flip_constraint('mass@primary', solve_for='sma@binary')
b['mass@primary'] = 3.0
# In[5]:
print(b.filter(qualifier=['mass', 'requiv', 'teff'], context='component'))
# In[6]:
b['irrad_frac_refl_bol@primary'] = 1.0
b['irrad_frac_refl_bol@secondary'] = 0.6
# We'll also disable any eclipsing effects.
# In[7]:
b['eclipse_method'] = 'only_horizon'
# Now we'll compute the light curves with wilson and horvat irradiation, and plot the relative differences between the two as a function of phase, for several different values of the inclination.
# In[8]:
phases = phoebe.linspace(0,1,101)
b.add_dataset('lc', times=b.to_time(phases))
# In[9]:
for incl in [0,30,60,90]:
b.set_value('incl@binary', incl)
b.run_compute(irrad_method='wilson')
fluxes_wilson = b.get_value('fluxes', context='model')
b.run_compute(irrad_method='horvat')
fluxes_horvat = b.get_value('fluxes', context='model')
plt.plot(phases, (fluxes_wilson-fluxes_horvat)/fluxes_wilson, label='i={}'.format(incl))
plt.xlabel('phase')
plt.ylabel('[F(wilson) - F(horvat)] / F(wilson)')
plt.legend(loc='upper center')
plt.show()
# In[ ]:
| gpl-3.0 |
salazardetroya/libmesh | doc/statistics/libmesh_citations.py | 1 | 2340 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 23,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 78,
'\'14', 60,
'\'15', 11,
'P', 8, # Preprints
'T', 36 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
CI-WATER/TethysCluster | utils/scimage_12_04.py | 2 | 17224 | #!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/precise/current
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf precise-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f precise-server-cloudimg-amd64.img
$ resize2fs precise-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount precise-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -t sysfs none /tmp/img-mount/sys
$ mount -o bind /dev /tmp/img-mount/dev
$ mount -t devpts none /tmp/img-mount/dev/pts
$ mount -o rbind /var/run/dbus /tmp/img-mount/var/run/dbus
Copy /etc/resolv.conf and /etc/mtab to the image::
$ mkdir -p /tmp/img-mount/var/run/resolvconf
$ cp /etc/resolv.conf /tmp/img-mount/var/run/resolvconf/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils dpkg-dev "
BUILD_UTILS_PKGS += "gfortran llvm-3.2-dev swig cdbs patch python-dev "
BUILD_UTILS_PKGS += "python-distutils-extra python-setuptools python-pip "
BUILD_UTILS_PKGS += "python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3u5 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
TethysCluster Ubuntu 12.04 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://star.mit.edu/cluster
Documentation: http://star.mit.edu/cluster/docs/latest
Code: https://github.com/jtriley/TethysCluster
Mailing list: tethyscluster@mit.edu
This AMI Contains:
* Open Grid Scheduler (OGS - formerly SGE) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* OpenBLAS - Highly optimized Basic Linear Algebra Routines
* NumPy/SciPy linked against OpenBlas
* IPython 0.13 with parallel and notebook support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./job.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('add-apt-repository ppa:staticfloat/julia-deps -y')
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor=7.7.2-1')
run_command('echo condor hold | dpkg --set-selections')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_blas_lapack():
"""docstring for install_openblas"""
chdir(SRC_DIR)
apt_install("libopenblas-dev")
def install_numpy_scipy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
run_command('pip install -d . numpy')
run_command('unzip numpy*.zip')
run_command("sed -i 's/return None #/pass #/' numpy*/numpy/core/setup.py")
run_command('pip install scipy')
def install_pandas():
"""docstring for install_pandas"""
chdir(SRC_DIR)
apt_command('build-dep pandas')
run_command('pip install pandas')
def install_matplotlib():
chdir(SRC_DIR)
run_command('pip install matplotlib')
def install_julia():
apt_install("libsuitesparse-dev libncurses5-dev "
"libopenblas-dev libarpack2-dev libfftw3-dev libgmp-dev "
"libunwind7-dev libreadline-dev zlib1g-dev")
buildopts = """\
BUILDOPTS="LLVM_CONFIG=llvm-config-3.2 USE_QUIET=0 USE_LIB64=0"; for lib in \
LLVM ZLIB SUITESPARSE ARPACK BLAS FFTW LAPACK GMP LIBUNWIND READLINE GLPK \
NGINX; do export BUILDOPTS="$BUILDOPTS USE_SYSTEM_$lib=1"; done"""
chdir(SRC_DIR)
if not os.path.exists("julia"):
run_command("git clone git://github.com/JuliaLang/julia.git")
run_command("%s && cd julia && make $BUILDOPTS PREFIX=/usr install" %
buildopts)
def install_mpi():
chdir(SRC_DIR)
apt_install('mpich2')
apt_command('build-dep openmpi')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
else:
apt_command('source openmpi')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dch --local=\'+custom\' '
'"custom build on: `uname -s -r -v -m -p -i -o`"')
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build OpenMPI with "
"Open Grid Scheduler support")
run_command('echo libopenmpi1.3 hold | dpkg --set-selections')
run_command('echo libopenmpi-dev hold | dpkg --set-selections')
run_command('echo libopenmpi-dbg hold | dpkg --set-selections')
run_command('echo openmpi-bin hold | dpkg --set-selections')
run_command('echo openmpi-checkpoint hold | dpkg --set-selections')
run_command('echo openmpi-common hold | dpkg --set-selections')
run_command('echo openmpi-doc hold | dpkg --set-selections')
run_command('pip install mpi4py')
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython tornado pygments pyzmq')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-tethyscluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
if os.path.isfile('/sbin/initctl') and not os.path.islink('/sbin/initctl'):
run_command('mv /sbin/initctl /sbin/initctl.bak')
run_command('ln -s /bin/true /sbin/initctl')
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = ["git", "mercurial", "subversion", "cvs", "vim", "vim-scripts",
"emacs", "tmux", "screen", "zsh", "ksh", "csh", "tcsh", "encfs",
"keychain", "unzip", "rar", "unace", "ec2-api-tools",
"ec2-ami-tools", "mysql-server", "mysql-client", "apache2",
"libapache2-mod-wsgi", "sysv-rc-conf", "pssh", "cython", "irssi",
"htop", "mosh", "default-jdk", "xvfb", "python-imaging",
"python-ctypes"]
apt_install(' '.join(pkgs))
def install_python_packges():
pypkgs = ['python-boto', 'python-paramiko', 'python-django',
'python-pudb']
for pypkg in pypkgs:
if pypkg.startswith('python-'):
apt_command('build-dep %s' % pypkg.split('python-')[1])
run_command('pip install %s')
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm -f /etc/resolv.conf')
run_command('rm -rf /var/run/resolvconf')
run_command('rm -f /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm -f %s' % f)
if os.path.islink('/sbin/initctl') and os.path.isfile('/sbin/initctl.bak'):
run_command('mv -f /sbin/initctl.bak /sbin/initctl')
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_default_packages()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
install_blas_lapack()
install_numpy_scipy()
install_matplotlib()
install_pandas()
install_ipython()
install_mpi()
install_hadoop()
install_nfs()
install_julia()
configure_init()
cleanup()
if __name__ == '__main__':
main()
| gpl-3.0 |
kristohr/pybayenv2 | pybayenv/compute_average_bf.py | 1 | 4066 | #!/usr/bin/python
import sys, string, re, os, commands, time, math
#from scipy import stats
#import scipy as sp
import numpy as np
#import matplotlib as mpl
#from matplotlib import pyplot as plt
class SNP:
def __init__(self, name, num_env, t):
self.name = name
self.num_env = [False] * num_env
self.bf_list = [[0 for i in range(t)] for j in range(num_env)]
self.rel_signal = []
self.sum_signals = 0
self.lg_info = []
self.chr = 99
self.lg = 99
def get_name(self):
return self.name
def get_num_env(self):
return self.num_env
def set_num_env(self, n):
self.num_env[n] = True
def add_to_list(self, bf, k, i):
self.bf_list[k][i] = bf
def set_signal(self, gamma):
self.rel_signal.append(gamma)
self.sum_signals += gamma #Add to the total of signals
#Return the bf signal in variable k
def get_signal(self, k):
return self.rel_signal[k]
#Return the bf signal list
def get_signals(self):
return self.rel_signal
def get_sum_signals(self):
return self.sum_signals
def print_env(self):
print self.num_env
def get_median_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
median = np.median(bfs)
return median
def get_avg_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
avg = np.average(bfs)
return avg
def add_bf(self, bf):
self.sum_bf += bf
def get_sum_bf(self):
return self.sum_bf
def get_num_runs(self):
return self.num_runs
def get_bf_list(self):
return self.bf_list
def get_bf_list(self):
return self.bf_list
def set_lg_info(self, info):
self.lg_info.append(info)
def get_lg_info(self):
return self.lg_info
def set_chr(self, ch):
self.chr = ch
def get_chr(self):
return self.chr
def set_linkage_group(self, lg):
self.lg = lg
def get_linkage_group(self):
return self.lg
def compute_average_bf(num_var, num_tests):
N = int(num_var)
t = int(num_tests)
snp_dict = {}
for i in range (0, t):
filename = "results/bf_results_t" + str(i) + ".bf"
data = open( filename, "r")
print filename
lines = data.readlines()
for line in lines:
cols = line.split("\t")
snp_name = cols[0][0:-2]
if i > 9:
snp_name = snp_name[0:-1]
if snp_name in snp_dict:
snp = snp_dict[snp_name]
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
else:
snp = SNP(snp_name, N, t)
snp_dict[snp_name] = snp
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
data.close()
print "################LENGTH:" + str(len(snp_dict))
FILE1 = open("results/median_bf.txt", "w")
FILE2 = open("results/average_bf.txt", "w")
#bf_median = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
#bf_avg = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
bf_median = ""
bf_avg = ""
for key in snp_dict:
snp = snp_dict[key]
bf_avg += snp.get_name()
bf_median += snp.get_name()
for k in range(0, N):
bf_a = snp.get_avg_bf(k)
bf_m = snp.get_median_bf(k)
bf_avg += "\t" + str(bf_a)
bf_median += "\t" + str(bf_m)
bf_avg += "\n"
bf_median += "\n"
FILE1.write(bf_median)
FILE2.write(bf_avg)
FILE1.close()
FILE2.close()
if __name__ == '__main__':
# Terminate if too few arguments
if len(sys.argv) < 3:
print 'usage: %s <number of vars> <num tests>' % sys.argv[0]
sys.exit(-1)
main(sys.argv[1], sys.argv[2])
| bsd-3-clause |
davidgardenier/frbpoppy | tests/dm_snr/future.py | 1 | 6523 | """Check the log N log F slope for future surveys."""
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from frbpoppy import CosmicPopulation, Survey, LargePopulation, SurveyPopulation, hist
from frbpoppy import unpickle, pprint
import frbpoppy.direction_dists as did
import frbpoppy.galacticops as go
from tests.convenience import plot_aa_style, rel_path
from tests.rates.alpha_real import EXPECTED
MAKE = True
SURVEYS = ('parkes-htru',
'wsrt-apertif',
'fast-crafts',
'puma-full',
'chord',
'ska1-low',
'ska1-mid')
SIZE = 5e4
if MAKE:
# Calculate the fraction of the sky that the survey covers
surv_f_area = {}
for name in SURVEYS:
pop = CosmicPopulation.simple(5e5)
pop.gen_direction()
survey = Survey(name)
mask = survey.in_region(pop.frbs.ra, pop.frbs.dec,
pop.frbs.gl, pop.frbs.gb)
in_surv_region = np.sum(mask)
tot_region = len(mask)
area_sky = 4*np.pi*(180/np.pi)**2 # In sq. degrees
f_area = (survey.beam_size/area_sky)*(tot_region/in_surv_region)
surv_f_area[name] = f_area
print(f'{name} covers {f_area*100}% of the sky')
surv_pops = []
for name in SURVEYS:
# Set up survey
survey = Survey(name)
if name in ('parkes-htru', 'wsrt-apertif'):
survey.set_beam(model=name)
# Set up CosmicPopulation
pop = CosmicPopulation.optimal(SIZE, generate=False)
# Only generate FRBs in the survey region
pop.set_direction(model='uniform',
min_ra=survey.ra_min,
max_ra=survey.ra_max,
min_dec=survey.dec_min,
max_dec=survey.dec_max)
# Parkes also has galactic limits:
if name == 'parkes-htru':
pop.gen_index()
pop.gen_dist()
pop.gen_time()
# Generate FRBs just within the galactic constraints
pop.gen_direction()
# Gather ra, dec coordinate limits
lims = {'min_ra': survey.ra_min, 'max_ra': survey.ra_max,
'min_dec': survey.dec_min, 'max_dec': survey.dec_max}
def sample(n_gen):
ra, dec = did.uniform(n_srcs=n_gen, **lims)
gl, gb = go.radec_to_lb(ra, dec, frac=True)
coords = [ra, dec, gl, gb]
return coords
def accept(coords):
return survey.in_region(*coords)
coords = sample(int(SIZE))
mask = accept(coords)
reject, = np.where(~mask)
while reject.size > 0:
fill = sample(reject.size)
mask = accept(fill)
for i in range(len(coords)):
coords[i][reject[mask]] = fill[i][mask]
reject = reject[~mask]
# Assign the values
frbs = pop.frbs
frbs.ra, frbs.dec = coords[0], coords[1]
frbs.gl, frbs.gb = coords[2], coords[3]
# Continue with generation
pop.gen_gal_coords()
pop.gen_dm()
pop.gen_w()
pop.gen_lum()
pop.gen_si()
else:
pop.generate()
surv_pop = SurveyPopulation(pop, survey, scale_by_area=False)
surv_pop.source_rate.f_area = surv_f_area[name]
surv_pop.source_rate.scale_by_area()
# surv_pop.save()
surv_pops.append(surv_pop)
else:
surv_pops = []
for name in SURVEYS:
surv_pops.append(unpickle(f'optimal_{name}'))
# Start plot
plot_aa_style(cols=2)
plt.rcParams["figure.figsize"] = (3.556*3, 3.556)
fig, axes = plt.subplots(1, 3)
for ax in axes.flatten():
ax.set_aspect('auto')
# Get norm pop
y = 0
ys = []
names = []
rates = []
norm_sim_rate = surv_pops[0].source_rate.det
norm_real_rate = EXPECTED['parkes-htru'][0] / EXPECTED['parkes-htru'][1]
norm_rate = norm_sim_rate / norm_real_rate
for i, surv_pop in enumerate(surv_pops):
name = surv_pop.name.split('_')[-1]
pprint(name)
if surv_pop.n_sources() == 0:
print(surv_pop.source_rate)
print(f'{name} | no FRBs in population')
continue
names.append(name)
ys.append(y)
# Dimensions measure plot
ax = axes[0]
ax.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax.set_ylabel(r'\#')
ax.set_yscale('log')
bins, values = hist(surv_pop.frbs.dm, bin_type='lin', norm='frac',
n_bins=20)
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Fluence plot
ax = axes[1]
ax.set_xlabel('S/N')
ax.set_xscale('log')
ax.set_ylabel(r'\#(${>}\text{S/N}$)')
ax.set_yscale('log')
# Update fluence plot
bins, values = hist(surv_pop.frbs.snr, bin_type='log', norm='frac',
n_bins=25)
# Cumulative sum
values = np.cumsum(values[::-1])[::-1]
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Plot rates
ax = axes[2]
ax.set_xscale('log')
ax.set_xlabel(r'Rate (day$^{-1}$)')
rate = surv_pop.source_rate.det/norm_rate
print(f'rate: {rate}')
line = ax.errorbar(rate, y,
fmt='x',
label=rf'{name}')
ax.grid()
rates.append(rate)
y += 1
ax.yaxis.tick_right()
ax.set_yticks(ys)
ax.set_yticklabels(names)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, y in enumerate(ax.get_yticklabels()):
y.set_color(colors[i])
ax.invert_yaxis() # labels read top-to-bottom
# Add thin grey horizontal lines
x_lim = ax.get_xlim()
ax.set_xlim(x_lim)
for i, y in enumerate(ys):
ax.plot((x_lim[0], rates[i]), (y, y), color='k', lw=0.5, zorder=0, ls='--')
for e in list(zip(SURVEYS, rates)):
pprint(e)
euclidean_lines = True
if euclidean_lines:
xlims = axes[1].get_xlim()
ylims = axes[1].get_ylim()
axes[1].set_xlim(xlims)
axes[1].set_ylim(ylims)
xs = np.logspace(np.log10(xlims[0]),
np.log10(xlims[1]),
100)
for n in range(-10, 15):
ys = 10**((np.log10(xs)+n)*-1.5)
axes[1].plot(xs, ys, 'k:', linewidth=0.25)
# plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/future_surveys.pdf'))
| mit |
tu-rbo/differentiable-particle-filters | methods/dpf_kitti.py | 1 | 43029 | import os
import numpy as np
import sonnet as snt
import tensorflow as tf
import matplotlib.pyplot as plt
from utils.data_utils_kitti import wrap_angle, compute_statistics, split_data, make_batch_iterator, make_repeating_batch_iterator, rotation_matrix, load_data_for_stats
from utils.method_utils import atan2, compute_sq_distance
from utils.plotting_utils import plot_maze, show_pause
from datetime import datetime
if tf.__version__ == '1.1.0-rc1' or tf.__version__ == '1.2.0':
from tensorflow.python.framework import ops
@ops.RegisterGradient("FloorMod")
def _mod_grad(op, grad):
x, y = op.inputs
gz = grad
x_grad = gz
y_grad = None # tf.reduce_mean(-(x // y) * gz, axis=[0], keep_dims=True)[0]
return x_grad, y_grad
class DPF():
def __init__(self, init_with_true_state, learn_odom, use_proposer, propose_ratio, proposer_keep_ratio, min_obs_likelihood, learn_gaussian_mle):
"""
:param init_with_true_state:
:param learn_odom:
:param use_proposer:
:param propose_ratio:
:param particle_std:
:param proposer_keep_ratio:
:param min_obs_likelihood:
"""
# store hyperparameters which are needed later
self.init_with_true_state = init_with_true_state
self.learn_odom = learn_odom
self.use_proposer = use_proposer and not init_with_true_state # only use proposer if we do not initializet with true state
self.propose_ratio = propose_ratio if not self.init_with_true_state else 0.0
# define some more parameters and placeholders
self.state_dim = 5
self.action_dim = 3
self.observation_dim = 6
self.placeholders = {'o': tf.placeholder('float32', [None, None, 50, 150, self.observation_dim], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 5], 'states'),
'num_particles': tf.placeholder('float32'),
'keep_prob': tf.placeholder_with_default(tf.constant(1.0), []),
'is_training': tf.placeholder_with_default(tf.constant(False), [])
}
self.num_particles_float = self.placeholders['num_particles']
self.num_particles = tf.to_int32(self.num_particles_float)
# build learnable modules
self.build_modules(min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle)
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
if learn_gaussian_mle:
self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
else:
self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])
self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))
def custom_build(self, inputs):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.dropout(outputs, self.placeholders['keep_prob'])
outputs = snt.BatchFlatten()(outputs)
outputs = snt.Linear(128)(outputs)
outputs = tf.nn.relu(outputs)
return outputs
def measurement_update(self, encoding, particles, means, stds):
"""
Compute the likelihood of the encoded observation for each particle.
:param encoding: encoding of the observation
:param particles:
:param means:
:param stds:
:return: observation likelihood
"""
# prepare input (normalize particles poses and repeat encoding per particle)
particle_input = self.transform_particles_as_input(particles, means, stds)
encoding_input = tf.tile(encoding[:, tf.newaxis, :], [1, tf.shape(particles)[1], 1])
input = tf.concat([encoding_input, particle_input], axis=-1)
# estimate the likelihood of the encoded observation for each particle, remove last dimension
obs_likelihood = snt.BatchApply(self.obs_like_estimator)(input)[:, :, 0]
return obs_likelihood
def transform_particles_as_input(self, particles, means, stds):
return ((particles - means['s']) / stds['s'])[..., 3:5]
def propose_particles(self, encoding, num_particles, state_mins, state_maxs):
duplicated_encoding = tf.tile(encoding[:, tf.newaxis, :], [1, num_particles, 1])
proposed_particles = snt.BatchApply(self.particle_proposer)(duplicated_encoding)
proposed_particles = tf.concat([
proposed_particles[:,:,:1] * (state_maxs[0] - state_mins[0]) / 2.0 + (state_maxs[0] + state_mins[0]) / 2.0,
proposed_particles[:,:,1:2] * (state_maxs[1] - state_mins[1]) / 2.0 + (state_maxs[1] + state_mins[1]) / 2.0,
atan2(proposed_particles[:,:,2:3], proposed_particles[:,:,3:4])], axis=2)
return proposed_particles
def motion_update(self, actions, particles, means, stds, state_step_sizes, learn_gaussian_mle, stop_sampling_gradient=False):
"""
Move particles according to odometry info in actions. Add learned noise.
:param actions:
:param particles:
:param means:
:param stds:
:param state_step_sizes:
:param stop_sampling_gradient:
:return: moved particles
"""
# 1. SAMPLE NOISY ACTIONS
# add dimension for particles
time_step = 0.103
if learn_gaussian_mle:
actions = tf.concat([particles[:, :, 3:4] - means['s'][:, :, 3:4], particles[:, :, 4:5] - means['s'][:, :, 4:5]], axis=-1)
# prepare input (normalize actions and repeat per particle)
action_input = actions / stds['s'][:, :, 3:5]
input = action_input
# estimate action noise
delta = snt.BatchApply(self.mo_noise_generator)(input)
delta = tf.concat([delta[:, :, 0:2] * state_step_sizes[3], delta[:, :, 2:4] * state_step_sizes[4]], axis=-1)
if stop_sampling_gradient:
delta = tf.stop_gradient(delta)
action_vel_f = tf.random_normal(tf.shape(particles[:, :, 3:4]), mean = delta[:, :, 0:1], stddev = delta[:, :, 1:2])
action_vel_rot = tf.random_normal(tf.shape(particles[:, :, 4:5]), mean = delta[:, :, 2:3], stddev = delta[:, :, 3:4])
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
new_x = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_y = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = particles[:, :, 2:3] + particles[:, :, 4:5] * time_step
wrap_angle(new_theta)
new_v = particles[:, :, 3:4] + action_vel_f
new_theta_dot = particles[:, :, 4:5] + action_vel_rot
moved_particles = tf.concat([new_x, new_y, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles, delta
else:
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
random_input = tf.random_normal(tf.shape(particles[:, :, 3:5]))
noise = snt.BatchApply(self.mo_noise_generator)(random_input)
noise = noise - tf.reduce_mean(noise, axis=1, keep_dims=True)
new_z = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_x = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = wrap_angle(particles[:, :, 2:3] + particles[:, :, 4:5] * time_step)
new_v = particles[:, :, 3:4] + noise[:, :, :1] * state_step_sizes[3]
new_theta_dot = particles[:, :, 4:5] + noise[:, :, 1:] * state_step_sizes[4]
moved_particles = tf.concat([new_z, new_x, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles
def compile_training_stages(self, sess, batch_iterators, particle_list, particle_probs_list, encodings, means, stds, state_step_sizes, state_mins, state_maxs, learn_gaussian_mle, learning_rate, plot_task):
# TRAINING!
losses = dict()
train_stages = dict()
std = 0.25
# TRAIN ODOMETRY
if self.learn_odom:
# apply model
motion_samples = self.motion_update(self.placeholders['a'][:,0],
self.placeholders['s'][:, :1],
means, stds, state_step_sizes,
stop_sampling_gradient=True)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
losses['motion_mse'] = tf.reduce_mean(sq_distance, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_odom'] = {
'train_op': optimizer.minimize(losses['motion_mse']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['motion_mse'],
'validation_loss': 'motion_mse',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MOTION MODEL
if learn_gaussian_mle:
motion_samples, motion_params = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, 1, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
diff_in_states = self.placeholders['s'][:, 1:2] - self.placeholders['s'][:, :1]
activations_vel_f = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 1] ** 2) * tf.exp(
-(diff_in_states[:, :, 3] - motion_params[:, :, 0]) ** 2 / (2.0 * motion_params[:, :, 1] ** 2))
activations_vel_rot = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 3] ** 2) * tf.exp(
-(diff_in_states[:, :, 4] - motion_params[:, :, 2]) ** 2 / (2.0 * motion_params[:, :, 3] ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + (tf.reduce_sum(activations_vel_f, axis=-1, name='loss1') * tf.reduce_sum(activations_vel_rot, axis=-1, name='loss2'))))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
else:
motion_samples = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, self.num_particles, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
activations_sample = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations_sample, axis=-1, name='loss')))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MEASUREMENT MODEL
# apply model for all pairs of observations and states in that batch
test_particles = tf.tile(self.placeholders['s'][tf.newaxis, :, 0], [self.batch_size, 1, 1])
measurement_model_out = self.measurement_update(encodings[:, 0], test_particles, means, stds)
# define loss (correct -> 1, incorrect -> 0) and optimizer
correct_samples = tf.diag_part(measurement_model_out)
incorrect_samples = measurement_model_out - tf.diag(tf.diag_part(measurement_model_out))
losses['measurement_heuristic'] = tf.reduce_sum(-tf.log(correct_samples)) / tf.cast(self.batch_size, tf.float32) \
+ tf.reduce_sum(-tf.log(1.0 - incorrect_samples)) / tf.cast(self.batch_size * (self.batch_size - 1), tf.float32)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_measurement_model'] = {
'train_op': optimizer.minimize(losses['measurement_heuristic']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['measurement_heuristic'],
'validation_loss': 'measurement_heuristic',
'plot': lambda e: self.plot_measurement_model(sess, batch_iterators['val1'], measurement_model_out) if e % 1 == 0 else None
}
# TRAIN PARTICLE PROPOSER
if self.use_proposer:
# apply model (but only compute gradients until the encoding,
# otherwise we would unlearn it and the observation likelihood wouldn't work anymore)
proposed_particles = self.propose_particles(tf.stop_gradient(encodings[:, 0]), self.num_particles, state_mins, state_maxs)
# define loss and optimizer
std = 0.2
sq_distance = compute_sq_distance(proposed_particles, self.placeholders['s'][:, :1], state_step_sizes)
activations = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['proposed_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=-1)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_particle_proposer'] = {
'train_op': optimizer.minimize(losses['proposed_mle']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['proposed_mle'],
'validation_loss': 'proposed_mle',
'plot': lambda e: self.plot_particle_proposer(sess, next(batch_iterators['val1']), proposed_particles, plot_task) if e % 10 == 0 else None
}
# END-TO-END TRAINING
# model was already applied further up -> particle_list, particle_probs_list
# define losses and optimizer
# first loss (which is being optimized)
sq_distance = compute_sq_distance(particle_list[:, :, :, 3:5], self.placeholders['s'][:, :, tf.newaxis, 3:5], state_step_sizes[3:5])
activations = particle_probs_list[:, :] / tf.sqrt(2 * np.pi * self.particle_std ** 2) * tf.exp(
-sq_distance / (2.0 * self.particle_std ** 2))
losses['mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=2, name='loss')))
# second loss (which we will monitor during execution)
pred = self.particles_to_state(particle_list, particle_probs_list)
sq_error = compute_sq_distance(pred[:, -1, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
sq_dist = compute_sq_distance(self.placeholders['s'][:, 0, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
losses['m/m'] = tf.reduce_mean(sq_error**0.5/sq_dist**0.5)
sq_error = compute_sq_distance(pred[:, -1, 2:3], self.placeholders['s'][:, -1, 2:3], [np.pi/180.0])
losses['deg/m'] = tf.reduce_mean(sq_error ** 0.5 / sq_dist ** 0.5)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# put everything together
train_stages['train_e2e'] = {
'train_op': optimizer.minimize(losses['mle']),
'batch_iterator_names': {'train': 'train', 'val': 'val'},
'monitor_losses': ['m/m', 'deg/m', 'mle'],
'validation_loss': 'deg/m',
'plot': lambda e: self.plot_particle_filter(sess, next(batch_iterators['val_ex']), particle_list,
particle_probs_list, state_step_sizes, plot_task) if e % 1 == 0 else None
}
return losses, train_stages
def load(self, sess, model_path, model_file='best_validation', statistics_file='statistics.npz', connect_and_initialize=True, modules=('encoder', 'mo_noise_generator', 'mo_transition_model', 'obs_like_estimator', 'particle_proposer')):
if type(modules) not in [type(list()), type(tuple())]:
raise Exception('modules must be a list or tuple, not a ' + str(type(modules)))
# build the tensorflow graph
if connect_and_initialize:
# load training data statistics (which are needed to build the tf graph)
statistics = dict(np.load(os.path.join(model_path, statistics_file)))
for key in statistics.keys():
if statistics[key].shape == ():
statistics[key] = statistics[key].item() # convert 0d array of dictionary back to a normal dictionary
# connect all modules into the particle filter
self.connect_modules(**statistics)
init = tf.global_variables_initializer()
sess.run(init)
# load variables
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
vars_to_load = []
loaded_modules = set()
for v in all_vars:
for m in modules:
if m in v.name:
vars_to_load.append(v)
loaded_modules.add(m)
print('Loading all modules')
saver = tf.train.Saver()
saver.restore(sess, os.path.join(model_path, model_file))
# def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, plot_task=None, plot=False):
def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, learn_gaussian_mle, plot_task=None, plot=False):
if plot:
plt.ion()
self.particle_std = particle_std
mean_loss_for_plot = np.zeros((1,))
means, stds, state_step_sizes, state_mins, state_maxs = compute_statistics(data)
data = split_data(data, ratio=split_ratio)
epoch_lengths = {'train': epoch_length, 'val': epoch_length*2}
batch_iterators = {'train': make_batch_iterator(data['train'], seq_len=seq_len, batch_size=batch_size),
'val': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=seq_len),
'train_ex': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=seq_len),
'val_ex': make_batch_iterator(data['val'], batch_size=batch_size, seq_len=seq_len),
'train1': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=1),
'train2': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=2),
'val1': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=1),
'val2': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=2),
}
# build the tensorflow graph by connecting all modules in the particles filter
particles, particle_probs, encodings, particle_list, particle_probs_list = self.connect_modules(means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle)
# define losses and train stages for different ways of training (e.g. training individual models and e2e training)
losses, train_stages = self.compile_training_stages(sess, batch_iterators, particle_list, particle_probs_list,
encodings, means, stds, state_step_sizes, state_mins,
state_maxs, learn_gaussian_mle, learning_rate, plot_task)
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# save statistics and prepare saving variables
if not os.path.exists(model_path):
os.makedirs(model_path)
np.savez(os.path.join(model_path, 'statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
saver = tf.train.Saver()
save_path = os.path.join(model_path, 'best_validation')
# define the training curriculum
curriculum = []
if train_individually:
if self.learn_odom:
curriculum += ['train_odom']
curriculum += ['train_measurement_model']
curriculum += ['train_motion_sampling']
if self.use_proposer:
curriculum += ['train_particle_proposer']
if train_e2e:
curriculum += ['train_e2e']
# split data for early stopping
data_keys = ['train']
if split_ratio < 1.0:
data_keys.append('val')
# define log dict
log = {c: {dk: {lk: {'mean': [], 'se': []} for lk in train_stages[c]['monitor_losses']} for dk in data_keys} for c in curriculum}
# go through curriculum
for c in curriculum:
stage = train_stages[c]
best_val_loss = np.inf
best_epoch = 0
epoch = 0
if c == 'train_e2e':
saver.save(sess, os.path.join(model_path, 'before_e2e/best_validation'))
np.savez(os.path.join(model_path, 'before_e2e/statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
while epoch < num_epochs and epoch - best_epoch < patience:
# training
for dk in data_keys:
# don't train in the first epoch, just evaluate the initial parameters
if dk == 'train' and epoch == 0:
continue
# set up loss lists which will be filled during the epoch
loss_lists = {lk: [] for lk in stage['monitor_losses']}
for e in range(epoch_lengths[dk]):
# t0 = time.time()
# pick a batch from the right iterator
batch = next(batch_iterators[stage['batch_iterator_names'][dk]])
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: num_particles},
}
if dk == 'train':
input_dict[self.placeholders['keep_prob']] = dropout_keep_ratio
input_dict[self.placeholders['is_training']] = True
monitor_losses = {l: losses[l] for l in stage['monitor_losses']}
if dk == 'train':
s_losses, _ = sess.run([monitor_losses, stage['train_op']], input_dict)
else:
s_losses = sess.run(monitor_losses, input_dict)
for lk in stage['monitor_losses']:
loss_lists[lk].append(s_losses[lk])
# after each epoch, compute and log statistics
for lk in stage['monitor_losses']:
log[c][dk][lk]['mean'].append(np.mean(loss_lists[lk]))
log[c][dk][lk]['se'].append(np.std(loss_lists[lk], ddof=1) / np.sqrt(len(loss_lists[lk])))
# check whether the current model is better than all previous models
if 'val' in data_keys:
current_val_loss = log[c]['val'][stage['validation_loss']]['mean'][-1]
mean_loss_for_plot = np.append(mean_loss_for_plot,current_val_loss)
if current_val_loss < best_val_loss:
best_val_loss = current_val_loss
best_epoch = epoch
# save current model
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
else:
txt = 'epoch {:>3} == '.format(epoch)
else:
best_epoch = epoch
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
# after going through all data sets, do a print out of the current result
for lk in stage['monitor_losses']:
txt += '{}: '.format(lk)
for dk in data_keys:
if len(log[c][dk][lk]['mean']) > 0:
txt += '{:.2f}+-{:.2f}/'.format(log[c][dk][lk]['mean'][-1], log[c][dk][lk]['se'][-1])
txt = txt[:-1] + ' -- '
print(txt)
if plot:
stage['plot'](epoch)
epoch += 1
# after running out of patience, restore the model with lowest validation loss
saver.restore(sess, save_path)
return log
def predict(self, sess, batch, return_particles=False, **kwargs):
# define input dict, use the first state only if we do tracking
input_dict = {self.placeholders['o']: batch['o'],
self.placeholders['a']: batch['a'],
self.placeholders['num_particles']: 100}
if self.init_with_true_state:
input_dict[self.placeholders['s']] = batch['s'][:, :1]
if return_particles:
return sess.run([self.pred_states, self.particle_list, self.particle_probs_list], input_dict)
else:
return sess.run(self.pred_states, input_dict)
def connect_modules(self, means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle=False):
# get shapes
self.batch_size = tf.shape(self.placeholders['o'])[0]
self.seq_len = tf.shape(self.placeholders['o'])[1]
# we use the static shape here because we need it to build the graph
self.action_dim = self.placeholders['a'].get_shape()[-1].value
encodings = snt.BatchApply(self.encoder)((self.placeholders['o'] - means['o']) / stds['o'])
# initialize particles
if self.init_with_true_state:
# tracking with known initial state
initial_particles = tf.tile(self.placeholders['s'][:, 0, tf.newaxis, :], [1, self.num_particles, 1])
else:
# global localization
if self.use_proposer:
# propose particles from observations
initial_particles = self.propose_particles(encodings[:, 0], self.num_particles, state_mins, state_maxs)
else:
# sample particles randomly
initial_particles = tf.concat(
[tf.random_uniform([self.batch_size, self.num_particles, 1], state_mins[d], state_maxs[d]) for d in
range(self.state_dim)], axis=-1, name='particles')
initial_particle_probs = tf.ones([self.batch_size, self.num_particles],
name='particle_probs') / self.num_particles_float
# assumes that samples has the correct size
def permute_batch(x, samples):
# get shapes
batch_size = tf.shape(x)[0]
num_particles = tf.shape(x)[1]
sample_size = tf.shape(samples)[1]
# compute 1D indices into the 2D array
idx = samples + num_particles * tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1]),
[1, sample_size])
# index using the 1D indices and reshape again
result = tf.gather(tf.reshape(x, [batch_size * num_particles, -1]), idx)
result = tf.reshape(result, tf.shape(x[:,:sample_size]))
return result
def loop(particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i):
num_proposed_float = tf.round((self.propose_ratio ** tf.cast(i, tf.float32)) * self.num_particles_float)
num_proposed = tf.cast(num_proposed_float, tf.int32)
num_resampled_float = self.num_particles_float - num_proposed_float
num_resampled = tf.cast(num_resampled_float, tf.int32)
if self.propose_ratio < 1.0:
# resampling
basic_markers = tf.linspace(0.0, (num_resampled_float - 1.0) / num_resampled_float, num_resampled)
random_offset = tf.random_uniform([self.batch_size], 0.0, 1.0 / num_resampled_float)
markers = random_offset[:, None] + basic_markers[None, :] # shape: batch_size x num_resampled
cum_probs = tf.cumsum(particle_probs, axis=1)
marker_matching = markers[:, :, None] < cum_probs[:, None, :] # shape: batch_size x num_resampled x num_particles
samples = tf.cast(tf.argmax(tf.cast(marker_matching, 'int32'), dimension=2), 'int32')
standard_particles = permute_batch(particles, samples)
standard_particle_probs = tf.ones([self.batch_size, num_resampled])
standard_particles = tf.stop_gradient(standard_particles)
standard_particle_probs = tf.stop_gradient(standard_particle_probs)
# motion update
if learn_gaussian_mle:
standard_particles, _ = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
else:
standard_particles = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
# measurement update
standard_particle_probs *= self.measurement_update(encodings[:, i], standard_particles, means, stds)
if self.propose_ratio > 0.0:
# proposed particles
proposed_particles = self.propose_particles(encodings[:, i], num_proposed, state_mins, state_maxs)
proposed_particle_probs = tf.ones([self.batch_size, num_proposed])
# NORMALIZE AND COMBINE PARTICLES
if self.propose_ratio == 1.0:
particles = proposed_particles
particle_probs = proposed_particle_probs
elif self.propose_ratio == 0.0:
particles = standard_particles
particle_probs = standard_particle_probs
else:
standard_particle_probs *= (num_resampled_float / self.num_particles_float) / tf.reduce_sum(standard_particle_probs, axis=1, keep_dims=True)
proposed_particle_probs *= (num_proposed_float / self.num_particles_float) / tf.reduce_sum(proposed_particle_probs, axis=1, keep_dims=True)
particles = tf.concat([standard_particles, proposed_particles], axis=1)
particle_probs = tf.concat([standard_particle_probs, proposed_particle_probs], axis=1)
# NORMALIZE PROBABILITIES
particle_probs /= tf.reduce_sum(particle_probs, axis=1, keep_dims=True)
particle_list = tf.concat([particle_list, particles[:, tf.newaxis]], axis=1)
particle_probs_list = tf.concat([particle_probs_list, particle_probs[:, tf.newaxis]], axis=1)
return particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i + 1
# reshapes and sets the first shape sizes to None (which is necessary to keep the shape consistent in while loop)
particle_list = tf.reshape(initial_particles,
shape=[self.batch_size, -1, self.num_particles, self.state_dim])
particle_probs_list = tf.reshape(initial_particle_probs, shape=[self.batch_size, -1, self.num_particles])
additional_probs_list = tf.reshape(tf.ones([self.batch_size, self.num_particles, 4]), shape=[self.batch_size, -1, self.num_particles, 4])
# run the filtering process
particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i = tf.while_loop(
lambda *x: x[-1] < self.seq_len, loop,
[initial_particles, initial_particle_probs, particle_list, particle_probs_list, additional_probs_list,
tf.constant(1, dtype='int32')], name='loop')
# compute mean of particles
self.pred_states = self.particles_to_state(particle_list, particle_probs_list)
self.particle_list = particle_list
self.particle_probs_list = particle_probs_list
return particles, particle_probs, encodings, particle_list, particle_probs_list
def particles_to_state(self, particle_list, particle_probs_list):
mean_position = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, :2], axis=2)
mean_orientation = atan2(
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.cos(particle_list[:, :, :, 2:3]), axis=2),
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.sin(particle_list[:, :, :, 2:3]), axis=2))
mean_velocity = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, 3:5], axis=2)
return tf.concat([mean_position, mean_orientation, mean_velocity], axis=2)
def plot_motion_model(self, sess, batch, motion_samples, task, state_step_sizes):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_motion_samples = sess.run(motion_samples, input_dict)
plt.figure('Motion Model')
plt.gca().clear()
for i in range(min(len(s_motion_samples), 10)):
plt.scatter(s_motion_samples[i, :, 3] / state_step_sizes[3], s_motion_samples[i, :, 4] / state_step_sizes[4], color='blue', s=1)
plt.scatter(batch['s'][i, 0, 3] / state_step_sizes[3], batch['s'][i, 0, 4] / state_step_sizes[4], color='black', s=1)
plt.scatter(batch['s'][i, 1, 3] / state_step_sizes[3], batch['s'][i, 1, 4] / state_step_sizes[4], color='red', s=3)
plt.plot(batch['s'][i, :2, 3] / state_step_sizes[3], batch['s'][i, :2, 4] / state_step_sizes[4], color='black')
plt.xlim([0, 200])
plt.ylim([-50, 50])
plt.xlabel('translational vel')
plt.ylabel('angular vel')
plt.gca().set_aspect('equal')
plt.pause(0.01)
def plot_measurement_model(self, sess, batch_iterator, measurement_model_out):
batch = next(batch_iterator)
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_measurement_model_out = sess.run([measurement_model_out], input_dict)
plt.figure('Measurement Model Output')
plt.gca().clear()
plt.imshow(s_measurement_model_out[0], interpolation="nearest", cmap="viridis_r", vmin=0.0, vmax=1.0)
plt.figure('Measurement Model Input')
plt.clf()
plt.scatter(batch['s'][:1, 0, 3], batch['s'][:1, 0, 4], marker='x', c=s_measurement_model_out[0][0,:1], vmin=0, vmax=1.0, cmap='viridis_r')
plt.scatter(batch['s'][1:, 0, 3], batch['s'][1:, 0, 4], marker='o', c=s_measurement_model_out[0][0,1:], vmin=0, vmax=1.0, cmap='viridis_r')
plt.xlabel('x_dot')
plt.ylabel('theta_dot')
plt.pause(0.01)
def plot_particle_proposer(self, sess, batch, proposed_particles, task):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_samples = sess.run(proposed_particles, input_dict)
plt.figure('Particle Proposer')
plt.gca().clear()
plot_maze(task)
for i in range(min(len(s_samples), 10)):
color = np.random.uniform(0.0, 1.0, 3)
plt.quiver(s_samples[i, :, 0], s_samples[i, :, 1], np.cos(s_samples[i, :, 2]), np.sin(s_samples[i, :, 2]), color=color, width=0.001, scale=100)
plt.quiver(batch['s'][i, 0, 0], batch['s'][i, 0, 1], np.cos(batch['s'][i, 0, 2]), np.sin(batch['s'][i, 0, 2]), color=color, scale=50, width=0.003)
plt.pause(0.01)
def plot_particle_filter(self, sess, batch, particle_list,
particle_probs_list, state_step_sizes, task):
s_states, s_particle_list, s_particle_probs_list, \
= sess.run([self.placeholders['s'], particle_list,
particle_probs_list], #self.noise_scaler1(1.0), self.noise_scaler2(2.0)],
{**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 20},
})
# print('learned motion noise factors {:.2f}/{:.2f}'.format(n1, n2))
num_steps = s_particle_list.shape[1]
for s in range(3):
plt.figure('particle_evolution, example {}'.format(s))
plt.clf()
for d in range(5):
plt.subplot(3, 2, [1, 3, 5, 2, 4][d])
for i in range(num_steps):
plt.scatter(i * np.ones_like(s_particle_list[s, i, :, d]),
s_particle_list[s, i, :, d] / (1 if s == 0 else state_step_sizes[d]),
c=s_particle_probs_list[s, i, :], cmap='viridis_r', marker='o', s=6, alpha=0.5,
linewidths=0.05,
vmin=0.0,
vmax=0.1)
current_state = batch['s'][s, i, d] / (1 if s == 0 else state_step_sizes[d])
plt.plot([i], [current_state], 'o', markerfacecolor='None', markeredgecolor='k',
markersize=2.5)
plt.xlabel('Time')
plt.ylabel('State {}'.format(d))
show_pause(pause=0.01)
| mit |
kc-lab/dms2dfe | dms2dfe/lib/io_ml.py | 2 | 24058 | #!usr/bin/python
# Copyright 2016, Rohan Dandage <rraadd_8@hotmail.com,rohan@igib.in>
# This program is distributed under General Public License v. 3.
"""
================================
``io_ml``
================================
"""
from os.path import abspath,dirname,exists,basename
from os import makedirs
from sklearn.preprocessing import label_binarize
from dms2dfe.lib.io_data_files import read_pkl,to_pkl
from dms2dfe.lib.io_dfs import set_index,denan,denanrows,del_Unnamed
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg') # no Xwindows
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
from dms2dfe.lib.io_strs import get_logger
logging=get_logger()
# logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..):%(lineno)d: %(message)s',level=logging.DEBUG) # filename=cfg_xls_fh+'.log'
def corrplot(info):
"""
Plots a correlation matrix heatmap between range of features and fold change values
:param info: dict, with the information of the experiment
"""
from dms2dfe.lib.io_dfs import fhs2data_combo
from glob import glob
from dms2dfe.lib.plot_mut_data_heatmaps import clustermap
from dms2dfe.lib.io_ml_data import make_dXy
ml_input=info.ml_input
prj_dh=info.prj_dh
data_fit_fhs=glob('%s/data_fit/aas/*' % prj_dh)
data_feats_all_fh='%s/data_feats/aas/data_feats_all' % prj_dh
data_feats_all=pd.read_csv(data_feats_all_fh).set_index('mutids')
data_fit_all=fhs2data_combo(data_fit_fhs,['%sA' % ml_input],'mutids')
data_fit_all.columns=[c.split(': ')[0] for c in data_fit_all]
for c in data_fit_all:
plot_fh='%s/plots/aas/%s.corr.pdf' % (prj_dh,c)
if not exists(plot_fh):
if not exists(dirname(plot_fh)):
makedirs(dirname(plot_fh))
dXy=data_feats_all.join(data_fit_all[c])
dXy,Xcols,ycol=make_dXy(dXy,ycol=c,
if_rescalecols=False,
unique_quantile=0.25)
dXy,Xcols,ycol=feats_sel_corr(dXy,ycol,range_coef=[0.9,0.8])
g,ax=clustermap(dXy.corr(method='spearman'),
highlight_col=c,
vlim=[-0.5,0.5],figsize=[10,10],
plot_fh=plot_fh,
)
def run_RF_classi(data_all,X_cols,y_coln,
test_size=0.34,data_test=None,data_out_fh=None):
"""
This implements Random Forest classifier.
:param data_all: dataframe with columns with features(Xs) and classes(y).
:param X_cols: list of column names with features.
:param y_coln: column name of column with classes.
:param plot_fh: path to output plot file.
:returns grid_search: trained classifier object.
:returns y_test: classes used for testing classifier.
:returns y_pred: predicted classes.
:returns y_score: scores of predicted classes used to plot ROC curve.
:returns feature_importances: relative importances of features (dataframe).
"""
from sklearn.ensemble import RandomForestClassifier
X=data_all.loc[:,list(X_cols)]
X=X.as_matrix()
y=data_all.loc[:,y_coln]
classes=y.unique()
y=y.as_matrix()
y = label_binarize(y, classes=classes)
if len(classes)==2:
y=np.array([i[0] for i in y])
if len(classes)>1:
if test_size!=0:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=88)
else :
X_train=X
y_train=y
X_test_df=data_test.loc[:,list(X_cols)]
X_test_df=denan(X_test_df,axis='both',condi='all any')
X_test=X_test_df.as_matrix()
y_test=None
model = RandomForestClassifier(random_state =88)
param_grid = {"n_estimators": [1000],
"max_features": ['sqrt'],#[None,'sqrt','log2'],
"min_samples_leaf":[1],#[1,25,50,100],
"criterion": ['entropy'],#["gini", "entropy"]
}
grid_search = GridSearchCV(model, param_grid=param_grid,cv=10)
grid_search.fit(X_train,y_train)
y_pred=grid_search.predict(X_test)
if test_size!=0:
data_preds=None
else:
data_preds=X_test_df
data_preds[y_coln]=binary2classes(y_pred,classes)
featimps=pd.DataFrame(columns=['Feature','Importance'])
featimps.loc[:,'Feature']=X_cols#[indices]
featimps.loc[:,'Importance']=grid_search.best_estimator_.feature_importances_
data={'RF_classi':grid_search,
'X_train':X_train,
'X_test':X_test,
'y_train':y_train,
'y_test':y_test,
'y_score':grid_search.predict_proba(X_test),
'classes':classes,
'X_cols':X_cols,
'y_coln':y_coln,
'features':X_cols,
'featimps':featimps,
'y_pred':y_pred,
'data_preds':data_preds}
to_pkl(data,data_out_fh)
return grid_search,data_preds
def run_RF_regress(data_all,X_cols,y_coln,
test_size=0.5,data_test=None,data_out_fh=None):
"""
This implements Random Forest classifier.
:param data_all: dataframe with columns with features(Xs) and classes(y).
:param X_cols: list of column names with features.
:param y_coln: column name of column with classes.
:param plot_fh: path to output plot file.
:returns grid_search: trained classifier object.
:returns y_test: classes used for testing classifier.
:returns y_pred: predicted classes.
:returns y_score: scores of predicted classes used to plot ROC curve.
:returns feature_importances: relative importances of features (dataframe).
"""
from sklearn.ensemble import RandomForestRegressor
X=data_all.loc[:,list(X_cols)]
X=X.as_matrix()
y=data_all.loc[:,y_coln]
y=y.as_matrix()
if test_size!=0:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=88)
else :
X_train=X
y_train=y
X_test=data_test.loc[:,list(X_cols)].as_matrix()
y_test=None
model = RandomForestRegressor(random_state =88)
param_grid = {"n_estimators": [3000],#[1000,2000,4000],#
"max_features": ['sqrt'],#[None,'sqrt','log2'],
"min_samples_leaf": [1],#[1,25,50,100],
"criterion": ["mse"],
"oob_score": [True],
}
grid_search = GridSearchCV(model, param_grid=param_grid,cv=10)
grid_search.fit(X_train,y_train)
y_pred=grid_search.predict(X_test)
if test_size!=0:
data_preds=None
# print grid_search.score(X_test, y_test)
else:
data_preds=data_test.loc[:,list(X_cols)]
data_preds[y_coln]=y_pred
featimps=pd.DataFrame(columns=['Feature','Importance'])
featimps.loc[:,'Feature']=X_cols#[indices]
featimps.loc[:,'Importance']=grid_search.best_estimator_.feature_importances_
data={'RF_regress':grid_search,
'X_train':X_train,
'X_test':X_test,
'y_train':y_train,
'y_test':y_test,
'X_cols':X_cols,
'y_coln':y_coln,
'features':X_cols,
'featimps':featimps,
'y_pred':y_pred,
'data_preds':data_preds}
to_pkl(data,data_out_fh)
return grid_search,data_preds
def data_combo2ml(data_combo,data_fn,data_dh,plot_dh,
ycoln,col_idx,
ml_type='both',
middle_percentile_skipped=0.1,
force=False,
):
"""
This runs the submodules to run classifier from fitness data (`data_combo`).
:param basename(data_fn): in the form <data_combo>/<aas/cds>/<name of file>.
:param data_feats: dataframe with features.
:param y_coln: column name of column with classes (ys).
:param ml_type: classi | both
"""
data_combo=del_Unnamed(data_combo)
for dh in [plot_dh,data_dh]:
if not exists(dh):
makedirs(dh)
# plot_cls_fh="%s/plot_ml_cls_%s.pdf" % (plot_dh,data_fn)
# plot_reg_fh="%s/plot_ml_reg_%s.pdf" % (plot_dh,data_fn)
data_combo_fh="%s/%s.input_raw" % (data_dh,data_fn)
data_fh="%s/%s.cls.all" % (data_dh,data_fn)
data_cls_train_fh="%s/%s.cls.train" % (data_dh,data_fn)
data_cls_tests_fh="%s/%s.cls.tests" % (data_dh,data_fn)
data_reg_train_fh="%s/%s.reg.train" % (data_dh,data_fn)
data_reg_tests_fh="%s/%s.reg.tests" % (data_dh,data_fn)
pkld_cls_fh='%s/%s.cls.pkl' % (data_dh,data_fn)
pkld_reg_fh='%s/%s.reg.pkl' % (data_dh,data_fn)
# pkld_cls_metrics_fh='%s/%s.cls.metrics.pkl' % (data_dh,data_fn)
pkld_reg_metrics_fh='%s/%s.reg.metrics.pkl' % (data_dh,data_fn)
feature_importances_cls_fh="%s_%s_.csv" % (pkld_cls_fh,'featimps')
y_coln_cls=ycoln
y_coln_reg=ycoln
if np.sum(~data_combo.loc[:,y_coln_cls].isnull())<50:
logging.error("skipping %s: need more data: %d<50" %\
(data_fn,np.sum(~data_combo.loc[:,ycoln].isnull())))
return False
logging.info("processing: %s" % data_fn)
if ml_type=='cls' or ml_type=='both':
if not exists(pkld_cls_fh):
if not exists(data_cls_train_fh):
data_combo,data_ml,data_cls_train,data_cls_tests=make_cls_input(data_combo,
y_coln_cls,
middle_percentile_skipped=middle_percentile_skipped)
data_combo.to_csv(data_combo_fh)
data_ml.to_csv(data_fh)
data_cls_train.to_csv(data_cls_train_fh)
data_cls_tests.to_csv(data_cls_tests_fh)
else:
data_cls_train=pd.read_csv(data_cls_train_fh)
data_cls_tests=pd.read_csv(data_cls_tests_fh)
data_cls_train =data_cls_train.set_index(col_idx,drop=True)
data_cls_tests =data_cls_tests.set_index(col_idx,drop=True)
y_coln_cls="classes"
logging.info("cls: train set = %d" % len(data_cls_train))
X_cols_cls=data_cls_train.columns.tolist()
X_cols_cls.remove(y_coln_cls)
# cls
pkld_cls,data_preds=run_RF_classi(data_cls_train,X_cols_cls,y_coln_cls,
test_size=0.34,data_out_fh=pkld_cls_fh) #
else:
logging.info('already exists: %s' % basename(pkld_cls_fh))
if not exists(feature_importances_cls_fh):
get_RF_classi_metrics(pkld_cls_fh,data_dh=data_dh,plot_dh=plot_dh)
if ml_type=='both':
if not exists(pkld_reg_fh):
if not exists('%s.train' % data_fh):
data_cls_tests=pd.read_csv(data_cls_train_fh)
data_cls_train=pd.read_csv(data_cls_tests_fh)
data_cls_tests =data_cls_tests.set_index(col_idx,drop=True)
data_cls_train =data_cls_train.set_index(col_idx,drop=True)
feature_importances_cls=pd.read_csv(feature_importances_cls_fh)
data_reg_train,data_reg_tests=make_reg_input(data_combo,data_cls_train,data_cls_tests,
feature_importances_cls,
y_coln_reg,
y_coln_cls="classes",
topNfeats=25)
data_reg_train.to_csv(data_reg_train_fh)
data_reg_tests.to_csv(data_reg_tests_fh)
else:
data_reg_train=pd.read_csv(data_cls_train_fh)
data_reg_tests=pd.read_csv(data_cls_tests_fh)
data_reg_train =data_reg_train.set_index(col_idx,drop=True)
data_reg_tests =data_reg_tests.set_index(col_idx,drop=True)
logging.info("reg: train set = %d" % len(data_reg_train))
X_cols_reg=[c for c in data_reg_train.columns.tolist() if c!=y_coln_reg]
# print data_reg_train.loc[:,X_cols_reg]
pkld_reg_metrics,data_preds_reg_metrics=\
run_RF_regress(data_reg_train,X_cols_reg,y_coln_reg,
test_size=0.34,data_out_fh=pkld_reg_metrics_fh)
get_RF_regress_metrics(pkld_reg_metrics_fh,data_dh=data_dh,plot_dh=plot_dh)
else:
logging.info('already exists: %s' % basename(pkld_reg_fh))
def data_regress2data_fit(prj_dh,data_fit_key,
data_regress_all,col='FCA_norm'):
"""
Transforms the fold changes estimated from a regression model in the format of data_fit
:param prj_dh: path to the project dorectory
:param data_fit_key: path key to data_fit file
:param data_regress_all: pandas table with regression estimated fold change values
"""
# from dms2dfe.lib.io_nums import str2num
from dms2dfe.lib.io_mut_files import rescale_fitnessbysynonymous,class_fit,mutids_converter
data_fit=pd.read_csv("%s/%s" % (prj_dh,data_fit_key))
data_fit=data_fit.loc[:,["mutids",col]].set_index("mutids",drop=True)
data_fit_combo=data_fit.copy()
data_fit_inferred=data_regress_all.reset_index().loc[:,["mutids",col]].set_index("mutids",drop=True)
data_mutids_common=denanrows(data_fit.join(data_fit_inferred.loc[:,col],rsuffix='_inferred'))
data_mutids_common=data_mutids_common.loc[(data_mutids_common.loc[:,data_mutids_common.columns[0]]!=data_mutids_common.loc[:,data_mutids_common.columns[1]]),:]
for m in data_fit_combo.index.tolist():
if pd.isnull(data_fit.loc[m,col]):
if m in data_fit_inferred.index.tolist():
data_fit_combo.loc[m,'inferred']=True
data_fit_combo.loc[m,col]=data_fit_inferred.loc[m,col]
else:
data_fit_combo.loc[m,'inferred']=False
for c in ['refi','ref','mut','refrefi']:
data_fit_combo.loc[:,c]=mutids_converter(data_fit_combo.index.tolist(), c, 'aas')
if col=='FCA_norm':
data_fit_combo=rescale_fitnessbysynonymous(data_fit_combo,col_fit=col,col_fit_rescaled="FiA")
data_fit_combo=class_fit(data_fit_combo)
data_fit_combo.loc[:,'FiS']=\
data_fit_combo.loc[(data_fit_combo.loc[:,'ref']==data_fit_combo.loc[:,'mut']),'FiA']
data_fit_combo=data_fit_combo.sort_values(by="refi",axis=0)
data_fit_combo.to_csv("%s/%s_inferred" % (prj_dh,data_fit_key))
return data_fit_combo
#GB
from dms2dfe.lib.io_strs import get_time
from dms2dfe.lib.io_ml_data import feats_inter,keep_cols,feats_sel_corr,make_dXy,feats_inter_sel_corr
# %run ../../progs/dms2dfe/dms2dfe/lib/io_ml.py
# %run ../../progs/dms2dfe/dms2dfe/lib/io_ml_data.py
# %run ../../1_dms_software/progs/dms2dfe/dms2dfe/lib/io_ml_metrics.py
from sklearn.model_selection import cross_val_predict,cross_val_score
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble.partial_dependence import plot_partial_dependence,partial_dependence
def run_est(est,X,y,params,cv=True):
"""
Runs an estimator
:param est: estimator object
:param X: predictors (X) values
:param y: target (y) values
:param params: additional fitting parameters
"""
if est=='GBR':
est = GradientBoostingRegressor(random_state=88)
elif est=='GBC':
est = GradientBoostingClassifier(random_state=88)
est.set_params(**params)
if cv:
r2s=cross_val_score(est,X,y,cv=10)
print [r2s,np.mean(r2s)]
return r2s,est
def est2feats_imp(est,Xcols,Xy=None):
"""
Get Feature importances from estimator
:param est: Estimator object
:param Xcols: list of column names of predictors
"""
try:
feat_imp = pd.DataFrame(est.feature_importances_, Xcols)#.sort_values(ascending=False)
except:
est.fit(Xy[0],Xy[1])
feat_imp = pd.DataFrame(est.feature_importances_, Xcols)#.sort_values(ascending=False)
feat_imp.columns=['Feature importance']
feat_imp=feat_imp.sort_values(by='Feature importance',ascending=False)
return feat_imp
def dXy2ml(dXy,ycol,params=None,
if_gridsearch=False,
if_partial_dependence=False,
if_feats_imps=False,
inter=None,
use_top=None,
out_fh=None,
regORcls='reg',
force=False,cores=8):
"""
Wrapper for ml operations
:param dXy: pandas table with preditors (X) and target (y) values
:param ycol: column name of the target column
"""
if out_fh is None:
out_fh='%s_%s.pkl' % ('dXy2ml',get_time())
if exists(out_fh) and (not force):
try:
dpkl=read_pkl(out_fh)
except:
return False
else:
dpkl={}
if not ('dXy_final' in dpkl.keys()) or force:
dpkl['dXy_input']=dXy
dpkl['ycol']=ycol
dXy_input=dXy.copy()
to_pkl(dpkl,out_fh) #back
dXy,Xcols,ycol=make_dXy(dXy,ycol=ycol,
if_rescalecols=True,
unique_quantile=0.25)
if len(dXy)<100:
return False
dpkl['dXy_preprocessed']=dXy
to_pkl(dpkl,out_fh) #back
dXy,Xcols,ycol=feats_sel_corr(dXy,ycol,range_coef=[0.9,0.8,0.7])
dpkl['dXy_feats_sel_corr']=dXy
to_pkl(dpkl,out_fh) #back
dXy,Xcols,ycol=keep_cols(dXy,dXy_input,ycol)
dpkl['dXy_feats_indi']=dXy
to_pkl(dpkl,out_fh) #back
if inter=='pre':
dXy,Xcols,ycol=feats_inter_sel_corr(dXy,ycol,Xcols,dpkl['dXy_feats_indi'].copy(),
top_cols=[
'Conservation score (inverse shannon uncertainty): gaps ignored',#'Conservation score (ConSurf)',
'Distance from active site residue: minimum',
'Distance from dimer interface',
'Temperature factor (flexibility)',
'Residue depth'])
dpkl['dXy_feats_inter_sel_corr']=dXy
dpkl['dXy_final']=dXy
else:
dXy_input=dpkl['dXy_input']
dXy=dpkl['dXy_final']
ycol=dpkl['ycol']
to_pkl(dpkl,out_fh) #back
Xcols=[c for c in dXy.columns.tolist() if c!=ycol]
X=dXy.loc[:,Xcols].as_matrix()
y=dXy.loc[:,ycol].as_matrix()
dpkl['X_final']=X
dpkl['y_final']=y
if regORcls=='reg':
est_method='GBR'
elif regORcls=='cls':
est_method='GBC'
if (if_gridsearch) or (params is None):
if not ('gs_cv' in dpkl.keys()) or force:
param_grid = {'learning_rate':[0.005,0.001,0.0001],#[0.1,0.01,0.005],# tuned with n estimators
'n_estimators':[1500,2000,3000,5000], # tuned with learning rate
'min_samples_leaf':[50,125], # lower -> less overfitting
'max_features':[None],
'max_depth':[6],
'min_samples_split':[int(len(dXy)*0.05),int(len(dXy)*0.1),int(len(dXy)*0.25),int(len(dXy)*0.5)], # 0.5 to 1 of samples
'subsample':[0.8],
}
if regORcls=='reg':
param_grid['loss']=['ls', 'lad', 'huber']
est_method='GBR'
est = GradientBoostingRegressor(random_state=88)
elif regORcls=='cls':
param_grid['loss']=['deviance', 'exponential']
est_method='GBC'
est = GradientBoostingClassifier(random_state=88)
logging.info('running grid search')
gs_cv = GridSearchCV(est, param_grid, n_jobs=cores,cv=10).fit(X, y)
print [gs_cv.best_params_,gs_cv.best_score_]
params=gs_cv.best_params_
dpkl['gs_cv']=gs_cv
to_pkl(dpkl,out_fh) #back
dpkl['params']=params
if 'params' in dpkl.keys() and not force:
params= dpkl['params']
elif params is None:
dpkl['params']=params
if not ('est_all_feats_r2s' in dpkl.keys()) or force:
r2s,est=run_est(est=est_method,X=X,y=y,params=params)
dpkl['est_all_feats']=est
dpkl['est_all_feats_r2s']=r2s
if not ('feat_imp' in dpkl.keys()) or force:
if if_gridsearch:
feat_imp=est2feats_imp(dpkl['gs_cv'].best_estimator_,Xcols,Xy=None)
else:
feat_imp=est2feats_imp(est,Xcols,Xy=[X,y])
dpkl['feat_imp']=feat_imp
to_pkl(dpkl,out_fh) #back
if if_feats_imps:
fig=plt.figure(figsize=(5,10))
ax=plt.subplot(111)
feat_imp.plot(kind='barh', title='Feature Importances',ax=ax)
ax.set_ylabel('Feature Importance Score')
to_pkl(dpkl,out_fh) #back
if not use_top is None:
Xcols=dpkl['feat_imp'].head(use_top).index.tolist() #int(len(feat_imp)*0.15)
# print Xcols[:use_top//5]
if inter=='top':
dXy,Xcols,ycol=feats_inter_sel_corr(dXy,ycol,Xcols,dXy_input,top_cols=Xcols[:len(Xcols)//5])
X=dXy.loc[:,Xcols].as_matrix()
y=dXy.loc[:,ycol].as_matrix()
r2s,est=run_est(est=est_method,X=X,y=y,params=params)
feat_imp=est2feats_imp(est,Xcols,Xy=[X,y])
dpkl['feat_imp_top_feats']=feat_imp
dpkl['dXy_top_feats']=dXy
dpkl['est_top_feats']=est
dpkl['est_top_feats_r2s']=r2s
to_pkl(dpkl,out_fh) #back
if if_partial_dependence:
feats_indi=[s for s in Xcols if not ((') ' in s) and (' (' in s))]
features=[Xcols.index(f) for f in feats_indi]
fig, axs = plot_partial_dependence(est, X, features,
feature_names=Xcols,
n_jobs=cores, grid_resolution=50,
figsize=[10,30])
to_pkl(dpkl,out_fh) #back
# return est,dXy,dpkl
from dms2dfe.lib.io_ml_metrics import get_GB_cls_metrics
def data_fit2ml(dX_fh,dy_fh,info,regORcls='cls'):
"""
Wrapper for overall data_fit to regression modelling
:param dX_fh: path to the file containing preditor values
:param dy_fh: path to the file containing target values
:param info: dict contaning information about the experiment
"""
dy=pd.read_csv(dy_fh).set_index('mutids')
dX=pd.read_csv(dX_fh).set_index('mutids')
out_fh='%s/data_ml/%s.pkl' % (info.prj_dh,basename(dy_fh))
if regORcls=='reg':
ycol='FiA'
dXy=pd.concat([dy.loc[:,ycol],dX],axis=1)
dXy.index.name='mutids'
params={'loss': 'ls', 'learning_rate': 0.001, 'min_samples_leaf': 50, 'n_estimators': 5000, 'subsample': 0.8, 'min_samples_split': 38, 'max_features': None, 'max_depth': 6}
elif regORcls=='cls':
ycol='class_fit_binary'
dy.loc[(dy.loc[:,'class_fit']=='enriched'),ycol]=1
dy.loc[(dy.loc[:,'class_fit']=='neutral'),ycol]=np.nan
dy.loc[(dy.loc[:,'class_fit']=='depleted'),ycol]=0
dXy=pd.concat([dy.loc[:,ycol],dX],axis=1)
dXy.index.name='mutids'
# params={'loss': 'deviance', 'learning_rate': 0.0001, 'min_samples_leaf': 50, 'n_estimators': 3000, 'subsample': 0.8, 'min_samples_split': 23, 'max_features': None, 'max_depth': 6}
params={'loss': 'exponential', 'learning_rate': 0.001, 'min_samples_leaf': 50, 'n_estimators': 1500, 'subsample': 0.8, 'min_samples_split': 23, 'max_features': None, 'max_depth': 6}
dXy2ml(dXy,ycol,
# params=params,
if_gridsearch=True,
if_partial_dependence=False,
# if_feats_imps=True,
out_fh=out_fh,
inter='pre',
# force=True,
# use_top=25,
regORcls=regORcls,
cores=int(info.cores))
# get metrics plots
get_GB_cls_metrics(data_fh=out_fh,info=info) | gpl-3.0 |
russel1237/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
SepehrMN/nest-simulator | pynest/examples/spatial/connex_ew.py | 14 | 2269 | # -*- coding: utf-8 -*-
#
# connex_ew.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NEST spatial example
--------------------
Create two populations of iaf_psc_alpha neurons on a 30x30 grid with edge_wrap,
connect with circular mask, flat probability,
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import matplotlib.pyplot as plt
import numpy as np
import nest
nest.ResetKernel()
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.], edge_wrap=True)
#######################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}}
nest.Connect(a, b,
conn_spec=cdict,
syn_spec={'weight': nest.random.uniform(0.5, 2.)})
plt.clf()
#####################################################################
# plot targets of neurons in different grid locations
# first, clear existing figure, get current figure
plt.clf()
fig = plt.gcf()
# plot targets of two source neurons into same figure, with mask
for src_index in [30 * 15 + 15, 0]:
# obtain node id for center
src = a[src_index:src_index + 1]
nest.PlotTargets(src, b, mask=cdict['mask'], fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets')
plt.show()
# plt.savefig('connex_ew.pdf')
| gpl-2.0 |
alexeyum/scikit-learn | sklearn/datasets/base.py | 11 | 23497 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer():
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
sauliusl/seaborn | seaborn/tests/test_palettes.py | 3 | 11509 | import colorsys
import numpy as np
import matplotlib as mpl
import pytest
import nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import palettes, utils, rcmod
from ..external import husl
from ..colors import xkcd_rgb, crayons
from distutils.version import LooseVersion
mpl_ge_150 = LooseVersion(mpl.__version__) >= '1.5.0'
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"])
rcmod.set_palette(pal)
assert pal == utils.get_color_cycle()
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(utils.get_color_cycle(), context_pal)
nt.assert_equal(utils.get_color_cycle(), default_pal)
def test_big_palette_context(self):
original_pal = palettes.color_palette("deep", n_colors=8)
context_pal = palettes.color_palette("husl", 10)
rcmod.set_palette(original_pal)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(utils.get_color_cycle(), context_pal)
nt.assert_equal(utils.get_color_cycle(), original_pal)
# Reset default
rcmod.set()
def test_palette_size(self):
pal = palettes.color_palette("deep")
assert len(pal) == palettes.QUAL_PALETTE_SIZES["deep"]
pal = palettes.color_palette("pastel6")
assert len(pal) == palettes.QUAL_PALETTE_SIZES["pastel6"]
pal = palettes.color_palette("Set3")
assert len(pal) == palettes.QUAL_PALETTE_SIZES["Set3"]
pal = palettes.color_palette("husl")
assert len(pal) == 6
pal = palettes.color_palette("Greens")
assert len(pal) == 6
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
full = palettes.color_palette(name, 10).as_hex()
short = palettes.color_palette(name + "6", 6).as_hex()
b, _, g, r, m, _, _, _, y, c = full
assert [b, g, r, m, y, c] == list(short)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep6")
double_deep = palettes.color_palette("deep6", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2) # noqa
pal_bright = palettes.hls_palette(5, l=.8) # noqa
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2) # noqa
pal_bright = palettes.husl_palette(5, l=.8) # noqa
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_cubehelix_code(self):
color_palette = palettes.color_palette
cubehelix_palette = palettes.cubehelix_palette
pal1 = color_palette("ch:", 8)
pal2 = color_palette(cubehelix_palette(8))
assert pal1 == pal2
pal1 = color_palette("ch:.5, -.25,hue = .5,light=.75", 8)
pal2 = color_palette(cubehelix_palette(8, .5, -.25, hue=.5, light=.75))
assert pal1 == pal2
pal1 = color_palette("ch:h=1,r=.5", 9)
pal2 = color_palette(cubehelix_palette(9, hue=1, rot=.5))
assert pal1 == pal2
pal1 = color_palette("ch:_r", 6)
pal2 = color_palette(cubehelix_palette(6, reverse=True))
assert pal1 == pal2
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
def test_crayon_palette(self):
names = list(crayons.keys())[10:15]
colors = palettes.crayon_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, crayons[name].lower())
def test_color_codes(self):
palettes.set_color_codes("deep")
colors = palettes.color_palette("deep6") + [".1"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
nt.assert_equal(rgb_want, rgb_got)
palettes.set_color_codes("reset")
with pytest.raises(ValueError):
palettes.set_color_codes("Set1")
def test_as_hex(self):
pal = palettes.color_palette("deep")
for rgb, hex in zip(pal, pal.as_hex()):
nt.assert_equal(mpl.colors.rgb2hex(rgb), hex)
def test_preserved_palette_length(self):
pal_in = palettes.color_palette("Set1", 10)
pal_out = palettes.color_palette(pal_in)
nt.assert_equal(pal_in, pal_out)
def test_get_color_cycle(self):
if mpl_ge_150:
colors = [(1., 0., 0.), (0, 1., 0.)]
prop_cycle = plt.cycler(color=colors)
with plt.rc_context({"axes.prop_cycle": prop_cycle}):
result = utils.get_color_cycle()
assert result == colors
| bsd-3-clause |
mxlei01/healthcareai-py | healthcareai/common/model_eval.py | 4 | 13696 | """Model evaluation tools."""
import os
import sklearn
import itertools
import numpy as np
import pandas as pd
import sklearn.metrics as skmetrics
from matplotlib import pyplot as plt
from healthcareai.common.healthcareai_error import HealthcareAIError
DIAGONAL_LINE_COLOR = '#bbbbbb'
DIAGONAL_LINE_STYLE = 'dotted'
def compute_roc(y_test, probability_predictions):
"""
Compute TPRs, FPRs, best cutoff, ROC auc, and raw thresholds.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate ROC
false_positive_rates, true_positive_rates, roc_thresholds = skmetrics.roc_curve(y_test, probability_predictions)
roc_auc = skmetrics.roc_auc_score(y_test, probability_predictions)
# get ROC ideal cutoffs (upper left, or 0,1)
roc_distances = (false_positive_rates - 0) ** 2 + (true_positive_rates - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
roc_index = np.where(roc_distances == np.min(roc_distances))[0][0]
best_tpr = true_positive_rates[roc_index]
best_fpr = false_positive_rates[roc_index]
ideal_roc_cutoff = roc_thresholds[roc_index]
return {'roc_auc': roc_auc,
'best_roc_cutoff': ideal_roc_cutoff,
'best_true_positive_rate': best_tpr,
'best_false_positive_rate': best_fpr,
'true_positive_rates': true_positive_rates,
'false_positive_rates': false_positive_rates,
'roc_thresholds': roc_thresholds}
def compute_pr(y_test, probability_predictions):
"""
Compute Precision-Recall, thresholds and PR AUC.
Args:
y_test (list) : true label values corresponding to the predictions. Also length n.
probability_predictions (list) : predictions coming from an ML algorithm of length n.
Returns:
dict:
"""
_validate_predictions_and_labels_are_equal_length(probability_predictions, y_test)
# Calculate PR
precisions, recalls, pr_thresholds = skmetrics.precision_recall_curve(y_test, probability_predictions)
pr_auc = skmetrics.average_precision_score(y_test, probability_predictions)
# get ideal cutoffs for suggestions (upper right or 1,1)
pr_distances = (precisions - 1) ** 2 + (recalls - 1) ** 2
# To prevent the case where there are two points with the same minimum distance, return only the first
# np.where returns a tuple (we want the first element in the first array)
pr_index = np.where(pr_distances == np.min(pr_distances))[0][0]
best_precision = precisions[pr_index]
best_recall = recalls[pr_index]
ideal_pr_cutoff = pr_thresholds[pr_index]
return {'pr_auc': pr_auc,
'best_pr_cutoff': ideal_pr_cutoff,
'best_precision': best_precision,
'best_recall': best_recall,
'precisions': precisions,
'recalls': recalls,
'pr_thresholds': pr_thresholds}
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
Returns:
dict: A dictionary of metrics objects
"""
# Get predictions
predictions = trained_sklearn_estimator.predict(x_test)
# Calculate individual metrics
mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)
result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}
return result
def calculate_binary_classification_metrics(trained_sklearn_estimator, x_test, y_test):
"""
Given a trained estimator, calculate metrics.
Args:
trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)
y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
Returns:
dict: A dictionary of metrics objects
"""
# Squeeze down y_test to 1D
y_test = np.squeeze(y_test)
_validate_predictions_and_labels_are_equal_length(x_test, y_test)
# Get binary and probability classification predictions
binary_predictions = np.squeeze(trained_sklearn_estimator.predict(x_test))
probability_predictions = np.squeeze(trained_sklearn_estimator.predict_proba(x_test)[:, 1])
# Calculate accuracy
accuracy = skmetrics.accuracy_score(y_test, binary_predictions)
roc = compute_roc(y_test, probability_predictions)
pr = compute_pr(y_test, probability_predictions)
# Unpack the roc and pr dictionaries so the metric lookup is easier for plot and ensemble methods
return {'accuracy': accuracy, **roc, **pr}
def roc_plot_from_thresholds(roc_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a ROC curve for each model.
Args:
roc_thresholds_by_model (dict): A dictionary of ROC thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TRP)')
plt.title('Receiver Operating Characteristic (ROC)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [0, 1], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, roc_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
roc_auc = metrics['roc_auc']
tpr = metrics['true_positive_rates']
fpr = metrics['false_positive_rates']
best_true_positive_rate = metrics['best_true_positive_rate']
best_false_positive_rate = metrics['best_false_positive_rate']
if debug:
print('{} model:'.format(model_name))
print(pd.DataFrame({'FPR': fpr, 'TPR': tpr}))
# plot the line
label = '{} (ROC AUC = {})'.format(model_name, round(roc_auc, 2))
plt.plot(fpr, tpr, color=color, label=label)
plt.plot([best_false_positive_rate], [best_true_positive_rate], marker='*', markersize=10, color=color)
plt.legend(loc="lower right")
if save:
plt.savefig('ROC.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nROC plot saved in: {}'.format(source_path))
plt.show()
def pr_plot_from_thresholds(pr_thresholds_by_model, save=False, debug=False):
"""
From a given dictionary of thresholds by model, create a PR curve for each model.
Args:
pr_thresholds_by_model (dict): A dictionary of PR thresholds by model name.
save (bool): False to display the image (default) or True to save it (but not display it)
debug (bool): verbost output.
"""
# TODO consolidate this and PR plotter into 1 function
# TODO make the colors randomly generated from rgb values
# Cycle through the colors list
color_iterator = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
# Initialize plot
plt.figure()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall (PR)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.plot([0, 1], [1, 0], linestyle=DIAGONAL_LINE_STYLE, color=DIAGONAL_LINE_COLOR)
# Calculate and plot for each model
for color, (model_name, metrics) in zip(color_iterator, pr_thresholds_by_model.items()):
# Extract model name and metrics from dictionary
pr_auc = metrics['pr_auc']
precision = metrics['precisions']
recall = metrics['recalls']
best_recall = metrics['best_recall']
best_precision = metrics['best_precision']
if debug:
print('{} model:'.format(model_name))
print(pd.DataFrame({'Recall': recall, 'Precision': precision}))
# plot the line
label = '{} (PR AUC = {})'.format(model_name, round(pr_auc, 2))
plt.plot(recall, precision, color=color, label=label)
plt.plot([best_recall], [best_precision], marker='*', markersize=10, color=color)
plt.legend(loc="lower left")
if save:
plt.savefig('PR.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nPR plot saved in: {}'.format(source_path))
plt.show()
def plot_random_forest_feature_importance(trained_random_forest, x_train, feature_names, feature_limit=15, save=False):
"""
Given a random forest estimator, an x_train array, the feature names save or display a feature importance plot.
Args:
trained_random_forest (sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.RandomForestRegressor):
x_train (numpy.array): A 2D numpy array that was used for training
feature_names (list): Column names in the x_train set
feature_limit (int): Number of features to display on graph
save (bool): True to save the plot, false to display it in a blocking thread
"""
_validate_random_forest_estimator(trained_random_forest)
# Sort the feature names and relative importances
# TODO this portion could probably be extracted and tested, since the plot is difficult to test
aggregate_features_importances = trained_random_forest.feature_importances_
indices = np.argsort(aggregate_features_importances)[::-1]
sorted_feature_names = [feature_names[i] for i in indices]
# limit the plot to the top n features so it stays legible on models with lots of features
subset_indices = indices[0:feature_limit]
number_of_features = x_train.shape[1]
# build a range using the lesser value
max_features = min(number_of_features, feature_limit)
x_axis_limit = range(max_features)
# Get the standard deviations for error bars
standard_deviations = _standard_deviations_of_importances(trained_random_forest)
# Turn off matplotlib interactive mode
plt.ioff()
# Set up the plot and axes
figure = plt.figure()
plt.title('Top {} (of {}) Important Features'.format(max_features, number_of_features))
plt.ylabel('Relative Importance')
# Plot each feature
plt.bar(
# this should go as far as the model or limit whichever is less
x_axis_limit,
aggregate_features_importances[subset_indices],
color="g",
yerr=standard_deviations[subset_indices],
align="center")
plt.xticks(x_axis_limit, sorted_feature_names, rotation=90)
# x axis scales by default
# set y axis min to zero
plt.ylim(ymin=0)
# plt.tight_layout() # Do not use tight_layout until https://github.com/matplotlib/matplotlib/issues/5456 is fixed
# Because long feature names cause this error
# Save or display the plot
if save:
plt.savefig('FeatureImportances.png')
source_path = os.path.dirname(os.path.abspath(__file__))
print('\nFeature importance plot saved in: {}'.format(source_path))
# Close the figure so it does not get displayed
plt.close(figure)
else:
plt.show()
def _validate_random_forest_estimator(trained_random_forest):
"""
Validate that an input is a random forest estimator and raise an error if it is not.
Args:
trained_random_forest: any input
"""
is_rf_classifier = isinstance(trained_random_forest, sklearn.ensemble.RandomForestClassifier)
is_rf_regressor = isinstance(trained_random_forest, sklearn.ensemble.RandomForestRegressor)
if not (is_rf_classifier or is_rf_regressor):
raise HealthcareAIError('Feature plotting only works with a scikit learn Random Forest estimator.')
def _standard_deviations_of_importances(trained_random_forest):
"""
Given a scikit-learn trained random forest estimator, return the standard deviations of all feature importances.
Args:
trained_random_forest (sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.RandomForestRegressor): the
trained estimator
Returns:
list: A numeric list
"""
# Get the individual feature importances from each tree to find the standard deviation for plotting error bars
individual_feature_importances = [tree.feature_importances_ for tree in trained_random_forest.estimators_]
standard_deviations = np.std(individual_feature_importances, axis=0)
return standard_deviations
def _validate_predictions_and_labels_are_equal_length(predictions, true_values):
if len(predictions) == len(true_values):
return True
else:
raise HealthcareAIError('The number of predictions is not equal to the number of true_values.')
if __name__ == '__main__':
pass
| mit |
costypetrisor/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.22/_downloads/52a5ebd4d6b8bcb7eccdf9bc2b0fcfcc/plot_cluster_stats_evoked.py | 18 | 3021 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/traitlets/config/loader.py | 3 | 28215 | # encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from ipython_genutils.path import filefind
from ipython_genutils import py3compat
from ipython_genutils.encoding import DEFAULT_ENCODING
from ipython_genutils.py3compat import unicode_type, iteritems
from traitlets.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = copy.deepcopy(v)
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = copy.deepcopy(v)
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from traitlets.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`traitlets.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warning("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def load_subconfig(self, fname, path=None):
"""Injected into config file namespace as load_subconfig"""
if path is None:
path = self.path
loader = self.__class__(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there,
# treat it as an empty config file.
pass
else:
self.config.merge(sub_config)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
def get_config():
"""Unnecessary now, but a deprecation warning is more trouble than it's worth."""
return self.config
namespace = dict(
c=self.config,
load_subconfig=self.load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from traitlets.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| artistic-2.0 |
LennonLab/ScalingMicroBiodiversity | ExtraTests/lognormal/Prestons_a.py | 2 | 2188 | from __future__ import division
#from bigfloat import BigFloat, sqrt, exp, log, log2, erf, const_pi
import numpy as np
import math
from numpy import log, log2, exp, sqrt,log10
from scipy.optimize import fsolve
import scipy.optimize as opt
import matplotlib.pyplot as plt
from scipy.special import erf
import sys
pi = math.pi
GO = [3.6*(10**28), 10.1*(10**28)] # estimated open ocean bacteria; Whitman et al. 1998
Pm = [2.8*(10**27), 3.0*(10**27)] # estimated Prochlorococcus; Flombaum et al. 2013
Syn = [6.7*(10**26), 7.3*(10**26)] # estimated Synechococcus; Flombaum et al. 2013
Earth = [9.2*(10**29), 31.7*(10**29)] # estimated bacteria on Earth; Kallmeyer et al. 2012
SAR11 = [2.0*(10**28), 2.0*(10**28)] # estimated percent abundance of SAR11; Morris et al. (2002)
HGx = 10**14 # estimated bacteria in Human gut; add reference
HGy = 0.1169*HGx # estimated most abundant bacteria in Human gut; add reference
AvianN = 2.82*10**11
AvianNmax = 3*10**9
AvianS = 10500
def alpha1(a, Nmax, Nt):
return (sqrt(pi) * Nmax)/(2.0*a) * erf(log(2.0)/a) - Nt # find alpha
def s1(a):
return sqrt(pi)/a * exp( (log(2.0)/(2.0*a))**2.0 ) # Using equation 8
def alpha2(a, N, Nmax, Nmin):
y = sqrt(pi*Nmin*Nmax)/(2.0*a) * exp((a * log2(sqrt(Nmax/Nmin)))**2.0)
y = y * exp((log(2.0)/(2.0*a))**2.0)
y = y * erf(a * log2(sqrt(Nmax/Nmin)) - log(2.0)/(2.0*a)) + erf(a * log2(sqrt(Nmax/Nmin)) + log(2.0)/(2.0*a))
y -= N
return y # find alpha
def s2(a, Nmax, Nmin):
return sqrt(pi)/a * exp( (a * log2(sqrt(Nmax/Nmin)))**2) # Using equation 10
def getNmax(N):
return 10 ** (1.02*(log10(N)) - 0.71)
def empS(N, b=log10(3.92), slope=0.4): # macrobes: b = 0.86, slope = 0.23
return 10 ** (b + slope*(log10(N)))
#N = float(AvianN)
#Nmax = AvianNmax
#Nmin = 1.0
#Nmax = getNmax(AvianN)
N = float(max(GO))
Nmax = float(max(Syn))
Nmin = 1.0
#Nmax = getNmax(N)
############################################### Assuming Nmin = 1
guess = 0.099
guess = 0.1019
a = opt.fsolve(alpha2, guess, (N, Nmax, Nmin))[0]
print guess, a
S2 = s2(a, Nmax, Nmin)
print 'S2:','%.3e' % S2 # predicted from lognormal
S = empS(N)
print 'empS:','%.3e' % S # predicted from scaling
| gpl-3.0 |
solarjoe/numpy | numpy/lib/npyio.py | 3 | 75574 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
`numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return asbytes
else:
return asstr
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(b'|'.join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
# not to be confused with the flatten_dtype we import...
def flatten_dtype_internal(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype_internal(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(b'\r\n')
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
b''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = b''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = b''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([b'']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(b",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != b'']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
woodscn/scipy | scipy/special/c_misc/struve_convergence.py | 76 | 3725 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
try:
import mpmath
except:
from sympy import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import \
_struve_power_series, _struve_asymp_large_z, _struve_bessel_series
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
import os
import sys
if '--main' in sys.argv:
main()
else:
import subprocess
subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'),
'-g', '--python', __file__, '--main'])
| bsd-3-clause |
ashhher3/seaborn | seaborn/tests/test_axisgrid.py | 11 | 41072 | import warnings
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
import pandas.util.testing as tm
from . import PlotTestCase
from .. import axisgrid as ag
from .. import rcmod
from ..palettes import color_palette
from ..distributions import kdeplot
from ..categorical import pointplot
from ..linearmodels import pairplot
from ..utils import categorical_order
rs = np.random.RandomState(0)
old_matplotlib = LooseVersion(mpl.__version__) < "1.4"
class TestFacetGrid(PlotTestCase):
df = pd.DataFrame(dict(x=rs.normal(size=60),
y=rs.gamma(4, size=60),
a=np.repeat(list("abc"), 20),
b=np.tile(list("mn"), 30),
c=np.tile(list("tuv"), 20),
d=np.tile(list("abcdefghij"), 6)))
def test_self_data(self):
g = ag.FacetGrid(self.df)
nt.assert_is(g.data, self.df)
def test_self_fig(self):
g = ag.FacetGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
def test_self_axes(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_axes_array_size(self):
g1 = ag.FacetGrid(self.df)
nt.assert_equal(g1.axes.shape, (1, 1))
g2 = ag.FacetGrid(self.df, row="a")
nt.assert_equal(g2.axes.shape, (3, 1))
g3 = ag.FacetGrid(self.df, col="b")
nt.assert_equal(g3.axes.shape, (1, 2))
g4 = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g4.axes.shape, (1, 1))
g5 = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g5.axes.shape, (3, 2))
for ax in g5.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_single_axes(self):
g1 = ag.FacetGrid(self.df)
nt.assert_is_instance(g1.ax, plt.Axes)
g2 = ag.FacetGrid(self.df, row="a")
with nt.assert_raises(AttributeError):
g2.ax
g3 = ag.FacetGrid(self.df, col="a")
with nt.assert_raises(AttributeError):
g3.ax
g4 = ag.FacetGrid(self.df, col="a", row="b")
with nt.assert_raises(AttributeError):
g4.ax
def test_col_wrap(self):
g = ag.FacetGrid(self.df, col="d")
nt.assert_equal(g.axes.shape, (1, 10))
nt.assert_is(g.facet_axis(0, 8), g.axes[0, 8])
g_wrap = ag.FacetGrid(self.df, col="d", col_wrap=4)
nt.assert_equal(g_wrap.axes.shape, (10,))
nt.assert_is(g_wrap.facet_axis(0, 8), g_wrap.axes[8])
nt.assert_equal(g_wrap._ncol, 4)
nt.assert_equal(g_wrap._nrow, 3)
with nt.assert_raises(ValueError):
g = ag.FacetGrid(self.df, row="b", col="d", col_wrap=4)
df = self.df.copy()
df.loc[df.d == "j"] = np.nan
g_missing = ag.FacetGrid(df, col="d")
nt.assert_equal(g_missing.axes.shape, (1, 9))
g_missing_wrap = ag.FacetGrid(df, col="d", col_wrap=4)
nt.assert_equal(g_missing_wrap.axes.shape, (9,))
def test_normal_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df)
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="c")
npt.assert_array_equal(g._bottom_axes, g.axes.flat)
npt.assert_array_equal(g._not_bottom_axes, null)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes.flat)
npt.assert_array_equal(g._not_left_axes, null)
npt.assert_array_equal(g._inner_axes, null)
g = ag.FacetGrid(self.df, col="a", row="c")
npt.assert_array_equal(g._bottom_axes, g.axes[-1, :].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:-1, :].flat)
npt.assert_array_equal(g._left_axes, g.axes[:, 0].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[:, 1:].flat)
npt.assert_array_equal(g._inner_axes, g.axes[:-1, 1:].flat)
def test_wrapped_axes(self):
null = np.empty(0, object).flat
g = ag.FacetGrid(self.df, col="a", col_wrap=2)
npt.assert_array_equal(g._bottom_axes,
g.axes[np.array([1, 2])].flat)
npt.assert_array_equal(g._not_bottom_axes, g.axes[:1].flat)
npt.assert_array_equal(g._left_axes, g.axes[np.array([0, 2])].flat)
npt.assert_array_equal(g._not_left_axes, g.axes[np.array([1])].flat)
npt.assert_array_equal(g._inner_axes, null)
def test_figure_size(self):
g = ag.FacetGrid(self.df, row="a", col="b")
npt.assert_array_equal(g.fig.get_size_inches(), (6, 9))
g = ag.FacetGrid(self.df, row="a", col="b", size=6)
npt.assert_array_equal(g.fig.get_size_inches(), (12, 18))
g = ag.FacetGrid(self.df, col="c", size=4, aspect=.5)
npt.assert_array_equal(g.fig.get_size_inches(), (6, 4))
def test_figure_size_with_legend(self):
g1 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5)
npt.assert_array_equal(g1.fig.get_size_inches(), (6, 4))
g1.add_legend()
nt.assert_greater(g1.fig.get_size_inches()[0], 6)
g2 = ag.FacetGrid(self.df, col="a", hue="c", size=4, aspect=.5,
legend_out=False)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
g2.add_legend()
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 4))
def test_legend_data(self):
g1 = ag.FacetGrid(self.df, hue="a")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=3)
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(a_levels))
for label, level in zip(labels, a_levels):
nt.assert_equal(label.get_text(), level)
def test_legend_data_missing_level(self):
g1 = ag.FacetGrid(self.df, hue="a", hue_order=list("azbc"))
g1.map(plt.plot, "x", "y")
g1.add_legend()
b, g, r, p = color_palette(n_colors=4)
palette = [b, r, p]
nt.assert_equal(g1._legend.get_title().get_text(), "a")
a_levels = sorted(self.df.a.unique())
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(a_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), 4)
for label, level in zip(labels, list("azbc")):
nt.assert_equal(label.get_text(), level)
def test_get_boolean_legend_data(self):
self.df["b_bool"] = self.df.b == "m"
g1 = ag.FacetGrid(self.df, hue="b_bool")
g1.map(plt.plot, "x", "y")
g1.add_legend()
palette = color_palette(n_colors=2)
nt.assert_equal(g1._legend.get_title().get_text(), "b_bool")
b_levels = list(map(str, categorical_order(self.df.b_bool)))
lines = g1._legend.get_lines()
nt.assert_equal(len(lines), len(b_levels))
for line, hue in zip(lines, palette):
nt.assert_equal(line.get_color(), hue)
labels = g1._legend.get_texts()
nt.assert_equal(len(labels), len(b_levels))
for label, level in zip(labels, b_levels):
nt.assert_equal(label.get_text(), level)
def test_legend_options(self):
g1 = ag.FacetGrid(self.df, hue="b")
g1.map(plt.plot, "x", "y")
g1.add_legend()
def test_legendout_with_colwrap(self):
g = ag.FacetGrid(self.df, col="d", hue='b',
col_wrap=4, legend_out=False)
g.map(plt.plot, "x", "y", linewidth=3)
g.add_legend()
def test_subplot_kws(self):
g = ag.FacetGrid(self.df, subplot_kws=dict(axisbg="blue"))
for ax in g.axes.flat:
nt.assert_equal(ax.get_axis_bgcolor(), "blue")
@skipif(old_matplotlib)
def test_gridspec_kws(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
g = ag.FacetGrid(self.df, col='c', row='a', gridspec_kws=gskws)
# clear out all ticks
for ax in g.axes.flat:
ax.set_xticks([])
ax.set_yticks([])
g.fig.tight_layout()
widths, heights = np.meshgrid(sizes, sizes)
for n, ax in enumerate(g.axes.flat):
npt.assert_almost_equal(
ax.get_position().width,
widths.flatten()[n],
decimal=2
)
npt.assert_almost_equal(
ax.get_position().height,
heights.flatten()[n],
decimal=2
)
@skipif(old_matplotlib)
def test_gridspec_kws_col_wrap(self):
ratios = [3, 1, 2, 1, 1]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='d',
col_wrap=5, gridspec_kws=gskws)
@skipif(not old_matplotlib)
def test_gridsic_kws_old_mpl(self):
ratios = [3, 1, 2]
sizes = [0.46, 0.15, 0.31]
gskws = dict(width_ratios=ratios, height_ratios=ratios)
with warnings.catch_warnings():
warnings.resetwarnings()
warnings.simplefilter("always")
npt.assert_warns(UserWarning, ag.FacetGrid, self.df, col='c',
row='a', gridspec_kws=gskws)
def test_data_generator(self):
g = ag.FacetGrid(self.df, row="a")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
tup, data = d[1]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
g = ag.FacetGrid(self.df, row="a", col="b")
d = list(g.facet_data())
nt.assert_equal(len(d), 6)
tup, data = d[0]
nt.assert_equal(tup, (0, 0, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "m").all())
tup, data = d[1]
nt.assert_equal(tup, (0, 1, 0))
nt.assert_true((data["a"] == "a").all())
nt.assert_true((data["b"] == "n").all())
tup, data = d[2]
nt.assert_equal(tup, (1, 0, 0))
nt.assert_true((data["a"] == "b").all())
nt.assert_true((data["b"] == "m").all())
g = ag.FacetGrid(self.df, hue="c")
d = list(g.facet_data())
nt.assert_equal(len(d), 3)
tup, data = d[1]
nt.assert_equal(tup, (0, 0, 1))
nt.assert_true((data["c"] == "u").all())
def test_map(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
g.map(plt.plot, "x", "y", linewidth=3)
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linewidth(), 3)
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_map_dataframe(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
plot = lambda x, y, data=None, **kws: plt.plot(data[x], data[y], **kws)
g.map_dataframe(plot, "x", "y", linestyle="--")
lines = g.axes[0, 0].lines
nt.assert_equal(len(lines), 3)
line1, _, _ = lines
nt.assert_equal(line1.get_linestyle(), "--")
x, y = line1.get_data()
mask = (self.df.a == "a") & (self.df.b == "m") & (self.df.c == "t")
npt.assert_array_equal(x, self.df.x[mask])
npt.assert_array_equal(y, self.df.y[mask])
def test_set(self):
g = ag.FacetGrid(self.df, row="a", col="b")
xlim = (-2, 5)
ylim = (3, 6)
xticks = [-2, 0, 3, 5]
yticks = [3, 4.5, 6]
g.set(xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks)
for ax in g.axes.flat:
npt.assert_array_equal(ax.get_xlim(), xlim)
npt.assert_array_equal(ax.get_ylim(), ylim)
npt.assert_array_equal(ax.get_xticks(), xticks)
npt.assert_array_equal(ax.get_yticks(), yticks)
def test_set_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "a = a | b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "a = a | b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "a = b | b = m")
# Test a provided title
g.set_titles("{row_var} == {row_name} \/ {col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "a == a \/ b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "a == a \/ b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "a == b \/ b == m")
# Test a single row
g = ag.FacetGrid(self.df, col="b")
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
# test with dropna=False
g = ag.FacetGrid(self.df, col="b", hue="b", dropna=False)
g.map(plt.plot, 'x', 'y')
def test_set_titles_margin_titles(self):
g = ag.FacetGrid(self.df, row="a", col="b", margin_titles=True)
g.map(plt.plot, "x", "y")
# Test the default titles
nt.assert_equal(g.axes[0, 0].get_title(), "b = m")
nt.assert_equal(g.axes[0, 1].get_title(), "b = n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
# Test the row "titles"
nt.assert_equal(g.axes[0, 1].texts[0].get_text(), "a = a")
nt.assert_equal(g.axes[1, 1].texts[0].get_text(), "a = b")
# Test a provided title
g.set_titles(col_template="{col_var} == {col_name}")
nt.assert_equal(g.axes[0, 0].get_title(), "b == m")
nt.assert_equal(g.axes[0, 1].get_title(), "b == n")
nt.assert_equal(g.axes[1, 0].get_title(), "")
def test_set_ticklabels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = [l.get_text() + "h" for l in g.axes[1, 0].get_xticklabels()]
ylab = [l.get_text() for l in g.axes[1, 0].get_yticklabels()]
g.set_xticklabels(xlab)
g.set_yticklabels(rotation=90)
got_x = [l.get_text() + "h" for l in g.axes[1, 1].get_xticklabels()]
got_y = [l.get_text() for l in g.axes[0, 0].get_yticklabels()]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
x, y = np.arange(10), np.arange(10)
df = pd.DataFrame(np.c_[x, y], columns=["x", "y"])
g = ag.FacetGrid(df).map(pointplot, "x", "y")
g.set_xticklabels(step=2)
got_x = [int(l.get_text()) for l in g.axes[0, 0].get_xticklabels()]
npt.assert_array_equal(x[::2], got_x)
g = ag.FacetGrid(self.df, col="d", col_wrap=5)
g.map(plt.plot, "x", "y")
g.set_xticklabels(rotation=45)
g.set_yticklabels(rotation=75)
for ax in g._bottom_axes:
for l in ax.get_xticklabels():
nt.assert_equal(l.get_rotation(), 45)
for ax in g._left_axes:
for l in ax.get_yticklabels():
nt.assert_equal(l.get_rotation(), 75)
def test_set_axis_labels(self):
g = ag.FacetGrid(self.df, row="a", col="b")
g.map(plt.plot, "x", "y")
xlab = 'xx'
ylab = 'yy'
g.set_axis_labels(xlab, ylab)
got_x = [ax.get_xlabel() for ax in g.axes[-1, :]]
got_y = [ax.get_ylabel() for ax in g.axes[:, 0]]
npt.assert_array_equal(got_x, xlab)
npt.assert_array_equal(got_y, ylab)
def test_axis_lims(self):
g = ag.FacetGrid(self.df, row="a", col="b", xlim=(0, 4), ylim=(-2, 3))
nt.assert_equal(g.axes[0, 0].get_xlim(), (0, 4))
nt.assert_equal(g.axes[0, 0].get_ylim(), (-2, 3))
def test_data_orders(self):
g = ag.FacetGrid(self.df, row="a", col="b", hue="c")
nt.assert_equal(g.row_names, list("abc"))
nt.assert_equal(g.col_names, list("mn"))
nt.assert_equal(g.hue_names, list("tuv"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bca"),
col_order=list("nm"),
hue_order=list("vtu"))
nt.assert_equal(g.row_names, list("bca"))
nt.assert_equal(g.col_names, list("nm"))
nt.assert_equal(g.hue_names, list("vtu"))
nt.assert_equal(g.axes.shape, (3, 2))
g = ag.FacetGrid(self.df, row="a", col="b", hue="c",
row_order=list("bcda"),
col_order=list("nom"),
hue_order=list("qvtu"))
nt.assert_equal(g.row_names, list("bcda"))
nt.assert_equal(g.col_names, list("nom"))
nt.assert_equal(g.hue_names, list("qvtu"))
nt.assert_equal(g.axes.shape, (4, 3))
def test_palette(self):
rcmod.set()
g = ag.FacetGrid(self.df, hue="c")
nt.assert_equal(g._colors, color_palette(n_colors=3))
g = ag.FacetGrid(self.df, hue="d")
nt.assert_equal(g._colors, color_palette("husl", 10))
g = ag.FacetGrid(self.df, hue="c", palette="Set2")
nt.assert_equal(g._colors, color_palette("Set2", 3))
dict_pal = dict(t="red", u="green", v="blue")
list_pal = color_palette(["red", "green", "blue"], 3)
g = ag.FacetGrid(self.df, hue="c", palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
list_pal = color_palette(["green", "blue", "red"], 3)
g = ag.FacetGrid(self.df, hue="c", hue_order=list("uvt"),
palette=dict_pal)
nt.assert_equal(g._colors, list_pal)
def test_hue_kws(self):
kws = dict(marker=["o", "s", "D"])
g = ag.FacetGrid(self.df, hue="c", hue_kws=kws)
g.map(plt.plot, "x", "y")
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
def test_dropna(self):
df = self.df.copy()
hasna = pd.Series(np.tile(np.arange(6), 10), dtype=np.float)
hasna[hasna == 5] = np.nan
df["hasna"] = hasna
g = ag.FacetGrid(df, dropna=False, row="hasna")
nt.assert_equal(g._not_na.sum(), 60)
g = ag.FacetGrid(df, dropna=True, row="hasna")
nt.assert_equal(g._not_na.sum(), 50)
class TestPairGrid(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "PairGrid")))
df = pd.DataFrame(dict(x=rs.normal(size=80),
y=rs.randint(0, 4, size=(80)),
z=rs.gamma(3, size=80),
a=np.repeat(list("abcd"), 20),
b=np.repeat(list("abcdefgh"), 10)))
def test_self_data(self):
g = ag.PairGrid(self.df)
nt.assert_is(g.data, self.df)
def test_ignore_datelike_data(self):
df = self.df.copy()
df['date'] = pd.date_range('2010-01-01', periods=len(df), freq='d')
result = ag.PairGrid(self.df).data
expected = df.drop('date', axis=1)
tm.assert_frame_equal(result, expected)
def test_self_fig(self):
g = ag.PairGrid(self.df)
nt.assert_is_instance(g.fig, plt.Figure)
def test_self_axes(self):
g = ag.PairGrid(self.df)
for ax in g.axes.flat:
nt.assert_is_instance(ax, plt.Axes)
def test_default_axes(self):
g = ag.PairGrid(self.df)
nt.assert_equal(g.axes.shape, (3, 3))
nt.assert_equal(g.x_vars, ["x", "y", "z"])
nt.assert_equal(g.y_vars, ["x", "y", "z"])
nt.assert_true(g.square_grid)
def test_specific_square_axes(self):
vars = ["z", "x"]
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, vars)
nt.assert_equal(g.y_vars, vars)
nt.assert_true(g.square_grid)
def test_specific_nonsquare_axes(self):
x_vars = ["x", "y"]
y_vars = ["z", "y", "x"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, x_vars)
nt.assert_equal(g.y_vars, y_vars)
nt.assert_true(not g.square_grid)
x_vars = ["x", "y"]
y_vars = "z"
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
def test_specific_square_axes_with_array(self):
vars = np.array(["z", "x"])
g = ag.PairGrid(self.df, vars=vars)
nt.assert_equal(g.axes.shape, (len(vars), len(vars)))
nt.assert_equal(g.x_vars, list(vars))
nt.assert_equal(g.y_vars, list(vars))
nt.assert_true(g.square_grid)
def test_specific_nonsquare_axes_with_array(self):
x_vars = np.array(["x", "y"])
y_vars = np.array(["z", "y", "x"])
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
nt.assert_equal(g.axes.shape, (len(y_vars), len(x_vars)))
nt.assert_equal(g.x_vars, list(x_vars))
nt.assert_equal(g.y_vars, list(y_vars))
nt.assert_true(not g.square_grid)
def test_size(self):
g1 = ag.PairGrid(self.df, size=3)
npt.assert_array_equal(g1.fig.get_size_inches(), (9, 9))
g2 = ag.PairGrid(self.df, size=4, aspect=.5)
npt.assert_array_equal(g2.fig.get_size_inches(), (6, 12))
g3 = ag.PairGrid(self.df, y_vars=["z"], x_vars=["x", "y"],
size=2, aspect=2)
npt.assert_array_equal(g3.fig.get_size_inches(), (8, 2))
def test_map(self):
vars = ["x", "y", "z"]
g1 = ag.PairGrid(self.df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(self.df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
def test_map_nonsquare(self):
x_vars = ["x"]
y_vars = ["y", "z"]
g = ag.PairGrid(self.df, x_vars=x_vars, y_vars=y_vars)
g.map(plt.scatter)
x_in = self.df.x
for i, i_var in enumerate(y_vars):
ax = g.axes[i, 0]
y_in = self.df[i_var]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
def test_map_lower(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_lower(plt.scatter)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.triu_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
def test_map_upper(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_upper(plt.scatter)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_map_diag(self):
g1 = ag.PairGrid(self.df)
g1.map_diag(plt.hist)
for ax in g1.diag_axes:
nt.assert_equal(len(ax.patches), 10)
g2 = ag.PairGrid(self.df)
g2.map_diag(plt.hist, bins=15)
for ax in g2.diag_axes:
nt.assert_equal(len(ax.patches), 15)
g3 = ag.PairGrid(self.df, hue="a")
g3.map_diag(plt.hist)
for ax in g3.diag_axes:
nt.assert_equal(len(ax.patches), 40)
@skipif(old_matplotlib)
def test_map_diag_and_offdiag(self):
vars = ["x", "y", "z"]
g = ag.PairGrid(self.df)
g.map_offdiag(plt.scatter)
g.map_diag(plt.hist)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
def test_palette(self):
rcmod.set()
g = ag.PairGrid(self.df, hue="a")
nt.assert_equal(g.palette, color_palette(n_colors=4))
g = ag.PairGrid(self.df, hue="b")
nt.assert_equal(g.palette, color_palette("husl", 8))
g = ag.PairGrid(self.df, hue="a", palette="Set2")
nt.assert_equal(g.palette, color_palette("Set2", 4))
dict_pal = dict(a="red", b="green", c="blue", d="purple")
list_pal = color_palette(["red", "green", "blue", "purple"], 4)
g = ag.PairGrid(self.df, hue="a", palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
list_pal = color_palette(["purple", "blue", "red", "green"], 4)
g = ag.PairGrid(self.df, hue="a", hue_order=list("dcab"),
palette=dict_pal)
nt.assert_equal(g.palette, list_pal)
def test_hue_kws(self):
kws = dict(marker=["o", "s", "d", "+"])
g = ag.PairGrid(self.df, hue="a", hue_kws=kws)
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
g = ag.PairGrid(self.df, hue="a", hue_kws=kws,
hue_order=list("dcab"))
g.map(plt.plot)
for line, marker in zip(g.axes[0, 0].lines, kws["marker"]):
nt.assert_equal(line.get_marker(), marker)
@skipif(old_matplotlib)
def test_hue_order(self):
order = list("dcab")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
@skipif(old_matplotlib)
def test_hue_order_missing_level(self):
order = list("dcaeb")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_diag(plt.plot)
for line, level in zip(g.axes[0, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_lower(plt.plot)
for line, level in zip(g.axes[1, 0].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "x"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "y"])
plt.close("all")
g = ag.PairGrid(self.df, hue="a", hue_order=order)
g.map_upper(plt.plot)
for line, level in zip(g.axes[0, 1].lines, order):
x, y = line.get_xydata().T
npt.assert_array_equal(x, self.df.loc[self.df.a == level, "y"])
npt.assert_array_equal(y, self.df.loc[self.df.a == level, "x"])
plt.close("all")
def test_nondefault_index(self):
df = self.df.copy().set_index("b")
vars = ["x", "y", "z"]
g1 = ag.PairGrid(df)
g1.map(plt.scatter)
for i, axes_i in enumerate(g1.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
g2 = ag.PairGrid(df, "a")
g2.map(plt.scatter)
for i, axes_i in enumerate(g2.axes):
for j, ax in enumerate(axes_i):
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
for k, k_level in enumerate("abcd"):
x_in_k = x_in[self.df.a == k_level]
y_in_k = y_in[self.df.a == k_level]
x_out, y_out = ax.collections[k].get_offsets().T
npt.assert_array_equal(x_in_k, x_out)
npt.assert_array_equal(y_in_k, y_out)
@skipif(old_matplotlib)
def test_pairplot(self):
vars = ["x", "y", "z"]
g = pairplot(self.df)
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_pairplot_reg(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, kind="reg")
for ax in g.diag_axes:
nt.assert_equal(len(ax.patches), 10)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
nt.assert_equal(len(ax.lines), 1)
nt.assert_equal(len(ax.collections), 2)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_pairplot_kde(self):
vars = ["x", "y", "z"]
g = pairplot(self.df, diag_kind="kde")
for ax in g.diag_axes:
nt.assert_equal(len(ax.lines), 1)
for i, j in zip(*np.triu_indices_from(g.axes, 1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.tril_indices_from(g.axes, -1)):
ax = g.axes[i, j]
x_in = self.df[vars[j]]
y_in = self.df[vars[i]]
x_out, y_out = ax.collections[0].get_offsets().T
npt.assert_array_equal(x_in, x_out)
npt.assert_array_equal(y_in, y_out)
for i, j in zip(*np.diag_indices_from(g.axes)):
ax = g.axes[i, j]
nt.assert_equal(len(ax.collections), 0)
@skipif(old_matplotlib)
def test_pairplot_markers(self):
vars = ["x", "y", "z"]
markers = ["o", "x", "s", "d"]
g = pairplot(self.df, hue="a", vars=vars, markers=markers)
nt.assert_equal(g.hue_kws["marker"], markers)
plt.close("all")
with nt.assert_raises(ValueError):
g = pairplot(self.df, hue="a", vars=vars, markers=markers[:-2])
class TestJointGrid(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "JointGrid")))
x = rs.randn(100)
y = rs.randn(100)
x_na = x.copy()
x_na[10] = np.nan
x_na[20] = np.nan
data = pd.DataFrame(dict(x=x, y=y, x_na=x_na))
def test_margin_grid_from_arrays(self):
g = ag.JointGrid(self.x, self.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_series(self):
g = ag.JointGrid(self.data.x, self.data.y)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_from_dataframe(self):
g = ag.JointGrid("x", "y", self.data)
npt.assert_array_equal(g.x, self.x)
npt.assert_array_equal(g.y, self.y)
def test_margin_grid_axis_labels(self):
g = ag.JointGrid("x", "y", self.data)
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x")
nt.assert_equal(ylabel, "y")
g.set_axis_labels("x variable", "y variable")
xlabel, ylabel = g.ax_joint.get_xlabel(), g.ax_joint.get_ylabel()
nt.assert_equal(xlabel, "x variable")
nt.assert_equal(ylabel, "y variable")
def test_dropna(self):
g = ag.JointGrid("x_na", "y", self.data, dropna=False)
nt.assert_equal(len(g.x), len(self.x_na))
g = ag.JointGrid("x_na", "y", self.data, dropna=True)
nt.assert_equal(len(g.x), pd.notnull(self.x_na).sum())
def test_axlims(self):
lim = (-3, 3)
g = ag.JointGrid("x", "y", self.data, xlim=lim, ylim=lim)
nt.assert_equal(g.ax_joint.get_xlim(), lim)
nt.assert_equal(g.ax_joint.get_ylim(), lim)
nt.assert_equal(g.ax_marg_x.get_xlim(), lim)
nt.assert_equal(g.ax_marg_y.get_ylim(), lim)
def test_marginal_ticks(self):
g = ag.JointGrid("x", "y", self.data)
nt.assert_true(~len(g.ax_marg_x.get_xticks()))
nt.assert_true(~len(g.ax_marg_y.get_yticks()))
def test_bivariate_plot(self):
g = ag.JointGrid("x", "y", self.data)
g.plot_joint(plt.plot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.y)
def test_univariate_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot_marginals(kdeplot)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
def test_plot(self):
g = ag.JointGrid("x", "x", self.data)
g.plot(plt.plot, kdeplot)
x, y = g.ax_joint.lines[0].get_xydata().T
npt.assert_array_equal(x, self.x)
npt.assert_array_equal(y, self.x)
_, y1 = g.ax_marg_x.lines[0].get_xydata().T
y2, _ = g.ax_marg_y.lines[0].get_xydata().T
npt.assert_array_equal(y1, y2)
def test_annotate(self):
g = ag.JointGrid("x", "y", self.data)
rp = stats.pearsonr(self.x, self.y)
g.annotate(stats.pearsonr)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "pearsonr = %.2g; p = %.2g" % rp)
g.annotate(stats.pearsonr, stat="correlation")
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "correlation = %.2g; p = %.2g" % rp)
def rsquared(x, y):
return stats.pearsonr(x, y)[0] ** 2
r2 = rsquared(self.x, self.y)
g.annotate(rsquared)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, "rsquared = %.2g" % r2)
template = "{stat} = {val:.3g} (p = {p:.3g})"
g.annotate(stats.pearsonr, template=template)
annotation = g.ax_joint.legend_.texts[0].get_text()
nt.assert_equal(annotation, template.format(stat="pearsonr",
val=rp[0], p=rp[1]))
def test_space(self):
g = ag.JointGrid("x", "y", self.data, space=0)
joint_bounds = g.ax_joint.bbox.bounds
marg_x_bounds = g.ax_marg_x.bbox.bounds
marg_y_bounds = g.ax_marg_y.bbox.bounds
nt.assert_equal(joint_bounds[2], marg_x_bounds[2])
nt.assert_equal(joint_bounds[3], marg_y_bounds[3])
| bsd-3-clause |
rseubert/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
andyr0id/PyGFNN | examples/gfnn/example1F.py | 1 | 1657 | #!/usr/bin/env python
__author__ = 'Andrew J. Lambert, andy@andyroid.co.uk'
"""
example1P
A one layer network with fixed internal connections
"""
from pygfnn.tools.plotting.gfnn import *
import pygfnn.tools.shortcuts as gfnn
import numpy as np
import timeit
import matplotlib.pyplot as plt
import scipy.io as sio
if __name__ == '__main__':
# Network parameters
oscParams = { 'a': 1, 'b1': -1, 'b2': -1000, 'd1': 0, 'd2': 0, 'e': 1 } # Limit cycle
learnParams = gfnn.NOLEARN_ALLFREQ
freqDist = { 'fspac': 'log', 'min': 0.5, 'max': 8 }
# Make network
n = gfnn.buildGFNN(196, oscParams = oscParams, freqDist = freqDist,
learnParams = learnParams)
n.recurrentConns[0].c0[:] = gfnn.getInitC(n, n, [(1,1), (1,2), (1,3), (1,4), (1,6), (1,8), (2,3), (3,4), (3,8)], thresh=0.01)
n.reset()
# First plots, showing initial connection state
ampFig1, phaseFig1 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
# Stimulus - 50 seconds of 1Hz sin
t = np.arange(0, 50, n['h'].dt)
x = np.sin(2 * np.pi * 1 * t) * 0.1
# Run the network
timer = timeit.default_timer
start = timer()
for i in range(len(t)):
out = n.activate(x[i])
end = timer()
print('Elapsed time is %f seconds' % (end - start))
if learnParams is not None:
# Second plots, showing final connection state
ampFig2, phaseFig2 = plotConns(n.recurrentConns[0].c, freqDist['min'], freqDist['max'])
Z = n['h'].outputbuffer[:n.offset]
fig1 = ampx(Z, n.dt, freqDist['min'], freqDist['max'])
fig2 = phasex(Z, n.dt, freqDist['min'], freqDist['max'])
plt.show()
| gpl-2.0 |
jereze/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
danforthcenter/plantcv | tests/tests.py | 1 | 288502 | #!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import matplotlib.pyplot as plt
import dask
from dask.distributed import Client
from skimage import img_as_ubyte
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_multivalue_filter():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": ["VIS", "NIR"]}
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117779',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_multivalue_filter_nomatch():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": ["VIS", "PSII"]}
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR, 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'
}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_workflowconfig_subdaily_timestampformat():
'''
timestampformats with only hours and smaller units of time were failing if the script was run earlier in the day than the images were taken. this was fixed by setting end_date to 23-59-59 if we don't detect the year-month-day
'''
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_IMG_DIR2, "test_plantcv_parallel_metadata_parser_subdaily_timestampformat", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR", "camera": "SV"}
config.start_date = None
config.end_date = None
config.timestampformat = "%H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images_w_date','NIR_SV_0_z1_h1_g0_e65_23_59_59.jpg'),
'imgtype': 'NIR',
'camera': 'SV',
'frame': '0',
'zoom': 'z1',
'lifter': 'h1',
'gain': 'g0',
'exposure': 'e65',
'timestamp': '23_59_59',
'measurementlabel': 'none',
'cartag':'none',
'id': 'none',
'treatment': 'none',
'plantbarcode': 'none',
'other': 'none'
}
}
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_DATA_BAD_INTERLEAVE = "darkReference4"
HYPERSPECTRAL_HDR_BAD_INTERLEAVE = "darkReference4.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
# TEST_BAD_MASK = "bad_mask_test.pkl"
# TEST_IM_BAD_NONE = "bad_mask_none.pkl"
# TEST_IM_BAD_BOTH = "bad_mask_both.pkl"
# TEST_IM_BAD_NAN = "bad_mask_nan.pkl"
# TEST_IM_BAD_INF = "bad_mask_inf.pkl"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_fusion():
# Read in test data
# 16-bit image
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN))
# 8-bit image
img2 = img_as_ubyte(img2)
fused_img = pcv.image_fusion(img1, img2, [480.0], [550.0, 640.0, 800.0])
assert str(type(fused_img)) == "<class 'plantcv.plantcv.classes.Spectral_data'>"
def test_plantcv_image_fusion_size_diff():
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), 0)
img2 = np.copy(img1)
img2 = img2[0:10, 0:10]
with pytest.raises(RuntimeError):
_ = pcv.image_fusion(img1, img2, [480.0, 550.0, 670.0], [480.0, 550.0, 670.0])
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_landmark_reference_pt_dist():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_landmark_reference")
os.mkdir(cache_dir)
points_rescaled = [(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236),
(0.7431, 0.3681), (0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889),
(0.4931, 0.5903), (0.3889, 0.5694), (0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278),
(0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139), (0.5417, 0.2292), (0.7708, 0.3472),
(0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)]
centroid_rescaled = (0.4685, 0.4945)
bottomline_rescaled = (0.4685, 0.2569)
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=[(10, 1000)], centroid_r=(10, 10), bline_r=(10, 10))
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=(0, 0), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=points_rescaled, centroid_r=centroid_rescaled,
bline_r=bottomline_rescaled, label="prefix")
assert len(pcv.outputs.observations['prefix'].keys()) == 8
def test_plantcv_laplace_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_laplace_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = None
pcv.params.debug = None
lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(lp_img), TEST_GRAY_DIM))
def test_plantcv_logical_and():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_and")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
and_img = pcv.logical_and(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(and_img), TEST_BINARY_DIM))
def test_plantcv_logical_or():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_or")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
or_img = pcv.logical_or(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(or_img), TEST_BINARY_DIM))
def test_plantcv_logical_xor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_xor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
xor_img = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(xor_img), TEST_BINARY_DIM))
def test_plantcv_median_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = None
pcv.params.debug = None
blur_img = pcv.median_blur(gray_img=img, ksize=5)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(blur_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(blur_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_median_blur_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.median_blur(img, 5.)
def test_plantcv_naive_bayes_classifier():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = None
pcv.params.debug = None
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(mask), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(mask), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_naive_bayes_classifier_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS_BAD))
def test_plantcv_object_composition():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
_ = pcv.object_composition(img=img, contours=[], hierarchy=object_hierarchy)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Test with debug = None
pcv.params.debug = None
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_object_composition_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_within_frame():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask_ib = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_oob = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_OOB), -1)
in_bounds_ib = pcv.within_frame(mask=mask_ib, border_width=1, label="prefix")
in_bounds_oob = pcv.within_frame(mask=mask_oob, border_width=1)
assert (in_bounds_ib is True and in_bounds_oob is False)
def test_plantcv_within_frame_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
grayscale_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
with pytest.raises(RuntimeError):
_ = pcv.within_frame(grayscale_img)
def test_plantcv_opening():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.opening(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.opening(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.opening(bin_img)
assert np.sum(filtered_img) == 16184595
def test_plantcv_opening_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.opening(rgb_img)
def test_plantcv_output_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False)
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Remove tmp files in working direcctory
shutil.rmtree("ori-images")
shutil.rmtree("mask-images")
# Test with debug = None
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png',
outdir=cache_dir, mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_output_mask_true():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
pcv.params.debug_outdir = cache_dir
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_plot_image_matplotlib_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pimg = pcv.visualize.pseudocolor(gray_img=img, mask=mask, min_value=10, max_value=200)
with pytest.raises(RuntimeError):
pcv.plot_image(pimg)
def test_plantcv_plot_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
try:
pcv.plot_image(img=img)
except RuntimeError:
assert False
# Assert that the image was plotted without error
assert True
def test_plantcv_print_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_bad_type():
with pytest.raises(RuntimeError):
pcv.print_image(img=[], filename="/dev/null")
def test_plantcv_print_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_matplotlib():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
# Input data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
plt.figure()
plt.imshow(img)
plot = plt.gcf()
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=plot, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_results(tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
pcv.print_results(filename=outfile)
assert os.path.exists(outfile)
def test_plantcv_readimage_native():
# Test with debug = None
pcv.params.debug = None
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='rgba')
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
# Assert that the image name returned equals the name of the input image
# Assert that the path of the image returned equals the path of the input image
# Assert that the dimensions of the returned image equals the expected dimensions
if img_name == TEST_INPUT_COLOR and path == TEST_DATA:
if all([i == j] for i, j in zip(np.shape(img), TEST_COLOR_DIM)):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_readimage_grayscale():
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="grey")
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="gray")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="rgb")
assert len(np.shape(img)) == 3
def test_plantcv_readimage_rgba_as_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_RGBA), mode="native")
assert np.shape(img)[2] == 3
def test_plantcv_readimage_csv():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_THERMAL_CSV), mode="csv")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_envi():
# Test with debug = None
pcv.params.debug = None
array_data = pcv.readimage(filename=os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA), mode="envi")
if sys.version_info[0] < 3:
assert len(array_data.array_type) == 8
def test_plantcv_readimage_bad_file():
with pytest.raises(RuntimeError):
_ = pcv.readimage(filename=TEST_INPUT_COLOR)
def test_plantcv_readbayer_default_bg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_readbayer_default_bg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
# Test with debug = "plot"
pcv.params.debug = "plot"
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_bad_input():
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, "no-image.png"), bayerpattern="GR", alg="default")
def test_plantcv_rectangle_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rectangle_mask(img=img_color, p1=(0, 0), p2=(2454, 2056), color="gray")
# Test with debug = None
pcv.params.debug = None
masked, hist, contour, heir = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="black")
maskedsum = np.sum(masked)
imgsum = np.sum(img)
assert maskedsum < imgsum
def test_plantcv_rectangle_mask_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="whit")
def test_plantcv_report_size_marker_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_report_size_marker_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
pcv.outputs.clear()
assert len(images) != 0
def test_plantcv_report_size_marker_define():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_grayscale_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# ROI contour
roi_contour = [np.array([[[0, 0]], [[0, 49]], [[49, 49]], [[49, 0]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_bad_marker_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='none',
objcolor='light', thresh_channel='s', thresh=120)
def test_plantcv_report_size_marker_bad_threshold_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel=None, thresh=120)
def test_plantcv_rgb2gray_cmyk():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
c = pcv.rgb2gray_cmyk(rgb_img=img, channel="c")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(c), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_cmyk_bad_channel():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
# Channel S is not in CMYK
_ = pcv.rgb2gray_cmyk(rgb_img=img, channel="s")
def test_plantcv_rgb2gray_hsv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_hsv")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = None
pcv.params.debug = None
s = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(s), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_hsv_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="l")
def test_plantcv_rgb2gray_lab():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_lab")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = None
pcv.params.debug = None
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(b), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_lab_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_lab(rgb_img=img, channel="v")
def test_plantcv_rgb2gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(gray), TEST_GRAY_DIM))
def test_plantcv_roi2mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "plot"
_ = pcv.roi.roi2mask(img=img, contour=obj_contour)
pcv.params.debug = "print"
mask = pcv.roi.roi2mask(img=img, contour=obj_contour)
assert np.shape(mask)[0:2] == np.shape(img)[0:2] and np.sum(mask) == 255
def test_plantcv_roi_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="largest")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="partial")
# Test with debug = None and roi_type = cutto
pcv.params.debug = None
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="cutto")
# Test with debug = None
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy, roi_type="partial")
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_roi_objects_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.roi_objects(img=img, roi_type="cut", roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy)
def test_plantcv_roi_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial", roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy)
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_rotate():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
rotated = pcv.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rotate_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate_gray():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_scale_features():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scale_features")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position='NA')
# Test with debug = None
pcv.params.debug = None
points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(obj=obj_contour, mask=mask,
points=TEST_ACUTE_RESULT,
line_position=50)
assert len(points_rescaled) == 23
def test_plantcv_scale_features_bad_input():
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_scharr_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scharr_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
# Test with debug = "print"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = None
pcv.params.debug = None
scharr_img = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(scharr_img), TEST_GRAY_DIM))
def test_plantcv_shift_img():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_shift_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="bottom")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="right")
# Test with debug = "plot"
_ = pcv.shift_img(img=mask, number=300, side="left")
# Test with debug = None
pcv.params.debug = None
rotated = pcv.shift_img(img=img, number=300, side="top")
imgavg = np.average(img)
shiftavg = np.average(rotated)
assert shiftavg != imgavg
def test_plantcv_shift_img_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=-300, side="top")
def test_plantcv_shift_img_bad_side_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=300, side="starboard")
def test_plantcv_sobel_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = None
pcv.params.debug = None
sobel_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(sobel_img), TEST_GRAY_DIM))
def test_plantcv_stdev_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
pcv.params.debug = "plot"
_ = pcv.stdev_filter(img=img, ksize=11)
pcv.params.debug = "print"
filter_img = pcv.stdev_filter(img=img, ksize=11)
assert (np.shape(filter_img) == np.shape(img))
def test_plantcv_watershed_segmentation():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_watershed_segmentation")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
# Test with debug = None
pcv.params.debug = None
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
assert pcv.outputs.observations['default']['estimated_object_count']['value'] > 9
def test_plantcv_white_balance_gray_16bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_16bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_gray_8bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_8bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_rgb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 5, 5, 5))
def test_plantcv_white_balance_bad_mode_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='histogram', roi=(5, 5, 80, 80))
def test_plantcv_white_balance_bad_input_int():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5., 5, 5, 5))
def test_plantcv_x_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_x_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=np.array([[0, 0], [0, 0]]), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=(), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_small_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_bad_input():
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_x_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_y_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=(), mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[21, 11]], [[159, 155]], [[237, 11]])),
mask=np.array(([[38, 54]], [[144, 169]], [[81, 137]])), img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_small_obj():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.outputs.clear()
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_y_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.y_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_background_subtraction():
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
big_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Testing if background subtraction is actually still working.
# This should return an array whose sum is greater than one
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
fgmask = pcv.background_subtraction(background_image=big_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) > 0)
# The same foreground subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) == 0)
# The same background subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) == 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_debug():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_background_subtraction_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
# Test with debug = "print"
pcv.params.debug = "print"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# Test with debug = "plot"
pcv.params.debug = "plot"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_bad_img_type():
fg_color = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_gray = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND), 0)
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.background_subtraction(background_image=bg_gray, foreground_image=fg_color)
def test_plantcv_background_subtraction_different_sizes():
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
bg_shp = np.shape(bg_img) # type: tuple
bg_img_resized = cv2.resize(bg_img, (int(bg_shp[0] / 2), int(bg_shp[1] / 2)), interpolation=cv2.INTER_AREA)
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img_resized, foreground_image=fg_img)
assert np.sum(fgmask) > 0
def test_plantcv_spatial_clustering_dbscan():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_dbscan")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = "print"
_ = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
pcv.params.debug = "plot"
spmask = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_optics():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_optics")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
spmask = pcv.spatial_clustering(img, algorithm="OPTICS", min_cluster_size=100, max_distance=5000)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_badinput():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
with pytest.raises(NameError):
_ = pcv.spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
# ##############################
# Tests for the learn subpackage
# ##############################
def test_plantcv_learn_naive_bayes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
maskdir = os.path.join(cache_dir, "masks")
if not os.path.exists(imgdir):
os.mkdir(imgdir)
if not os.path.exists(maskdir):
os.mkdir(maskdir)
# Copy and image and mask to the image/mask directories
shutil.copyfile(os.path.join(TEST_DATA, TEST_VIS_SMALL), os.path.join(imgdir, "image.png"))
shutil.copyfile(os.path.join(TEST_DATA, TEST_MASK_SMALL), os.path.join(maskdir, "image.png"))
# Run the naive Bayes training module
outfile = os.path.join(cache_dir, "naive_bayes_pdfs.txt")
plantcv.learn.naive_bayes(imgdir=imgdir, maskdir=maskdir, outfile=outfile, mkplots=True)
assert os.path.exists(outfile)
def test_plantcv_learn_naive_bayes_multiclass():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes_multiclass")
os.mkdir(cache_dir)
# Run the naive Bayes multiclass training module
outfile = os.path.join(cache_dir, "naive_bayes_multiclass_pdfs.txt")
plantcv.learn.naive_bayes_multiclass(samples_file=os.path.join(TEST_DATA, TEST_SAMPLED_RGB_POINTS), outfile=outfile,
mkplots=True)
assert os.path.exists(outfile)
# ####################################
# Tests for the morphology subpackage
# ####################################
def test_plantcv_morphology_segment_curvature():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_curvature")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects, label="prefix")
pcv.params.debug = "plot"
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects)
assert len(pcv.outputs.observations['default']['segment_curvature']['value']) == 22
def test_plantcv_morphology_check_cycles():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "print"
_ = pcv.morphology.check_cycles(mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.check_cycles(mask)
pcv.params.debug = None
_ = pcv.morphology.check_cycles(mask)
assert pcv.outputs.observations['default']['num_cycles']['value'] == 1
def test_plantcv_morphology_find_branch_pts():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton)
pcv.params.debug = None
branches = pcv.morphology.find_branch_pts(skel_img=skeleton)
assert np.sum(branches) == 9435
def test_plantcv_morphology_find_tips():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_tips")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_tips(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_tips(skel_img=skeleton)
pcv.params.debug = None
tips = pcv.morphology.find_tips(skel_img=skeleton)
assert np.sum(tips) == 9435
def test_plantcv_morphology_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.prune(skel_img=skeleton, size=1)
pcv.params.debug = "plot"
_ = pcv.morphology.prune(skel_img=skeleton, size=1, mask=skeleton)
pcv.params.debug = None
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_prune_size0():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=0)
assert np.sum(pruned_img) == np.sum(skeleton)
def test_plantcv_morphology_iterative_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img = pcv.morphology._iterative_prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_segment_skeleton():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_skeleton")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.segment_skeleton(skel_img=skeleton, mask=mask)
pcv.params.debug = "plot"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
assert len(segment_objects) == 73
def test_plantcv_morphology_fill_segments():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj)
tests = [pcv.outputs.observations['default']['segment_area']['value'][42] == 5529,
pcv.outputs.observations['default']['segment_area']['value'][20] == 5057,
pcv.outputs.observations['default']['segment_area']['value'][49] == 3323]
assert all(tests)
def test_plantcv_morphology_fill_segments_with_stem():
# Clear previous outputs
pcv.outputs.clear()
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
stem_obj = obj[0:4]
pcv.params.debug = None
_ = pcv.morphology.fill_segments(mask, obj, stem_obj)
num_objects = len(pcv.outputs.observations['default']['leaf_area']['value'])
assert num_objects == 69
def test_plantcv_morphology_segment_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 22
def test_plantcv_morphology_segment_angle_overflow():
# Clear previous outputs
pcv.outputs.clear()
# Don't prune, would usually give overflow error without extra if statement in segment_angle
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 73
def test_plantcv_morphology_segment_euclidean_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_eu_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_eu_length']['value']) == 22
def test_plantcv_morphology_segment_euclidean_length_bad_input():
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skel = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skel)
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
def test_plantcv_morphology_segment_path_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_path_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_path_length']['value']) == 22
def test_plantcv_morphology_skeletonize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_skeletonize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
input_skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = "plot"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
skeleton = pcv.morphology.skeletonize(mask=mask)
arr = np.array(skeleton == input_skeleton)
assert arr.all()
def test_plantcv_morphology_segment_sort():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_sort")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.params.debug = "print"
_ = pcv.morphology.segment_sort(skeleton, seg_objects, mask=skeleton)
pcv.params.debug = "plot"
leaf_obj, stem_obj = pcv.morphology.segment_sort(skeleton, seg_objects)
assert len(leaf_obj) == 36
def test_plantcv_morphology_segment_tangent_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2)
assert len(pcv.outputs.observations['default']['segment_tangent_angle']['value']) == 73
def test_plantcv_morphology_segment_id():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_id(skel, objs)
pcv.params.debug = "plot"
_, labeled_img = pcv.morphology.segment_id(skel, objs, mask=skel)
assert np.sum(labeled_img) > np.sum(skel)
def test_plantcv_morphology_segment_insertion_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 3, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
assert pcv.outputs.observations['default']['segment_insertion_angle']['value'][:6] == ['NA', 'NA', 'NA',
24.956918822001636,
50.7313343343401,
56.427712102130734]
def test_plantcv_morphology_segment_insertion_angle_bad_stem():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
stem_obj = [leaf_obj[0], leaf_obj[10]]
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
def test_plantcv_morphology_segment_combine():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
# Test with list of IDs input
_, new_objects = pcv.morphology.segment_combine([0, 1], seg_objects, skel)
assert len(new_objects) + 1 == len(seg_objects)
def test_plantcv_morphology_segment_combine_lists():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "print"
# Test with list of lists input
_, new_objects = pcv.morphology.segment_combine([[0, 1, 2], [3, 4]], seg_objects, skel)
assert len(new_objects) + 3 == len(seg_objects)
def test_plantcv_morphology_segment_combine_bad_input():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_, new_objects = pcv.morphology.segment_combine([0.5, 1.5], seg_objects, skel)
def test_plantcv_morphology_analyze_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_analyze_stem")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, segmented_img, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == -12.531776428222656
def test_plantcv_morphology_analyze_stem_bad_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
_, _ = pcv.morphology.segment_sort(pruned, seg_objects)
# print([stem_obj[3]])
# stem_obj = [stem_obj[3]]
stem_obj = [[[[1116, 1728]], [[1116, 1]]]]
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == 22877334.0
# ########################################
# Tests for the hyperspectral subpackage
# ########################################
def test_plantcv_hyperspectral_read_data_default():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_read_data_default")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
pcv.params.debug = "print"
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_no_default_bands():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_NO_DEFAULT)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_approx_pseudorgb():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_APPROX_PSEUDO)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_bad_interleave():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_BAD_INTERLEAVE)
with pytest.raises(RuntimeError):
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
def test_plantcv_spectral_index_ndvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_gdvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_gdvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_gdvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.gdvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_savi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_savi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_savi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.savi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ci_rededge():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ci_rededge")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ci_rededge_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ci_rededge(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri550():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri550")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri550_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri550(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri700():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri700")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri700_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri700(hsi=index_array, distance=20)
def test_plantcv_spectral_index_egi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_egi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
index_array = pcv.spectral_index.egi(rgb_img=rgb_img)
assert np.shape(index_array.array_data) == (2056, 2454) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_evi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.evi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mcari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mcari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mcari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mcari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mtci():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mtci")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mtci_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mtci(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ndre():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndre")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndre_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndre(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rgri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rgri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rgri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rgri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rvsi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rvsi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rvsi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rvsi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sipi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sipi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sipi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sipi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sr():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sr")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sr_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sr(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vi_green():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vi_green")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vi_green_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vi_green(hsi=index_array, distance=20)
def test_plantcv_spectral_index_wi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_wi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_wi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.wi(hsi=index_array, distance=20)
def test_plantcv_hyperspectral_analyze_spectral():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_spectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
# pcv.params.debug = "plot"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True)
# pcv.params.debug = "print"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
pcv.params.debug = None
_ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
assert len(pcv.outputs.observations['prefix']['spectral_frequencies']['value']) == 978
def test_plantcv_hyperspectral_analyze_index():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
# pcv.params.debug = "print"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
# pcv.params.debug = "plot"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_set_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_set_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True, min_bin=0, max_bin=1)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_auto_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin="auto", max_bin="auto")
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_outside_range_warning():
import io
from contextlib import redirect_stdout
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
f = io.StringIO()
with redirect_stdout(f):
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55, label="i")
out = f.getvalue()
# assert os.listdir(cache_dir) is 0
assert out[0:10] == 'WARNING!!!'
def test_plantcv_hyperspectral_analyze_index_bad_input_mask():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_index():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
index_array.array_data = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_datatype():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=array_data, mask=mask_img)
def test_plantcv_hyperspectral_calibrate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_calibrate")
os.mkdir(cache_dir)
raw = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
white = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_WHITE)
dark = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DARK)
raw = pcv.hyperspectral.read_data(filename=raw)
white = pcv.hyperspectral.read_data(filename=white)
dark = pcv.hyperspectral.read_data(filename=dark)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
pcv.params.debug = "print"
calibrated = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
assert np.shape(calibrated.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_extract_wavelength():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_extract_wavelength")
os.mkdir(cache_dir)
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
pcv.params.debug = "print"
new = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
assert np.shape(new.array_data) == (1, 1600)
def test_plantcv_hyperspectral_avg_reflectance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
spectral = pcv.hyperspectral.read_data(filename=spectral)
avg_reflect = pcv.hyperspectral._avg_reflectance(spectral, mask=mask_img)
assert len(avg_reflect) == 978
def test_plantcv_hyperspectral_inverse_covariance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
inv_cov = pcv.hyperspectral._inverse_covariance(spectral)
assert np.shape(inv_cov) == (978, 978)
# ########################################
# Tests for the photosynthesis subpackage
# ########################################
def test_plantcv_photosynthesis_read_dat():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_photosynthesis_read_dat")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
fluor_filename = os.path.join(FLUOR_TEST_DATA, FLUOR_IMG)
_, _, _ = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
pcv.params.debug = "print"
fdark, fmin, fmax = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
assert np.sum(fmin) < np.sum(fmax)
def test_plantcv_photosynthesis_analyze_fvfm():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# filename = os.path.join(cache_dir, 'plantcv_fvfm_hist.png')
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
fvfm_images = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
assert len(fvfm_images) != 0
def test_plantcv_photosynthesis_analyze_fvfm_print_analysis_results():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
result_file = os.path.join(cache_dir, "results.txt")
pcv.print_results(result_file)
pcv.outputs.clear()
assert os.path.exists(result_file)
def test_plantcv_photosynthesis_analyze_fvfm_bad_fdark():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark + 3000, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
check = pcv.outputs.observations['default']['fdark_passed_qc']['value'] is False
assert check
def test_plantcv_photosynthesis_analyze_fvfm_bad_input():
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
# ##############################
# Tests for the roi subpackage
# ##############################
def test_plantcv_roi_from_binary_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_from_binary_image")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Create a binary image
bin_img = np.zeros(np.shape(rgb_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Create a binary image
bin_img = np.zeros(np.shape(gray_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_bad_binary_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Binary input is required but an RGB input is provided
with pytest.raises(RuntimeError):
_, _ = pcv.roi.from_binary_image(bin_img=rgb_img, img=rgb_img)
def test_plantcv_roi_rectangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_rectangle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=3000, img=rgb_img)
def test_plantcv_roi_circle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_circle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.circle(x=100, y=100, r=50, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.circle(x=200, y=225, r=75, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 424, 1, 2)
def test_plantcv_roi_circle_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.circle(x=50, y=225, r=75, img=rgb_img)
def test_plantcv_roi_ellipse():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_ellipse")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.ellipse(x=200, y=200, r1=75, r2=50, angle=0, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 360, 1, 2)
def test_plantcv_roi_ellipse_out_of_frame():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The resulting rectangle needs to be within the dimensions of the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.ellipse(x=50, y=225, r1=75, r2=50, angle=0, img=rgb_img)
def test_plantcv_roi_multi():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20)
# Test with debug = None
pcv.params.debug = None
rois1, roi_hierarchy1 = pcv.roi.multi(rgb_img, coord=(25, 120), radius=20, spacing=(10, 10), nrows=3, ncols=6)
# Assert the contours has 18 ROIs
assert len(rois1) == 18
def test_plantcv_roi_multi_bad_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# The user must input a list of custom coordinates OR inputs to make a grid. Not both
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25, 120), (100, 100)], radius=20, spacing=(10, 10), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# nputs to make a grid make ROIs that go off the screen
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=(25000, 12000), radius=2, spacing=(1, 1), nrows=3, ncols=6)
def test_plantcv_roi_multi_bad_input_oob_list():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# All vertices in the list of centers must draw roi's that are inside the image
with pytest.raises(RuntimeError):
_, _ = pcv.roi.multi(rgb_img, coord=[(25000, 25000), (25000, 12000), (12000, 12000)], radius=20)
def test_plantcv_roi_custom():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
cnt, hier = pcv.roi.custom(img=img, vertices=[[226, 1], [313, 184], [240, 202], [220, 229], [161, 171]])
assert np.shape(cnt) == (1, 5, 2)
def test_plantcv_roi_custom_bad_input():
# Read in test RGB image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# ROI goes out of bounds
with pytest.raises(RuntimeError):
_ = pcv.roi.custom(img=img, vertices=[[226, -1], [3130, 1848], [2404, 2029], [2205, 2298], [1617, 1761]])
# ##############################
# Tests for the transform subpackage
# ##############################
def test_plantcv_transform_get_color_matrix():
# load in target_matrix
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# Read in rgb_img and gray-scale mask
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The result should be a len(np.unique(mask))-1 x 4 matrix
headers, matrix = pcv.transform.get_color_matrix(rgb_img, mask)
assert np.array_equal(matrix, matrix_compare)
def test_plantcv_transform_get_color_matrix_img():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_color_matrix_mask():
# Read in two gray-scale images
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK))
# The input for rgb_img needs to be an RGB image
with pytest.raises(RuntimeError):
_, _ = pcv.transform.get_color_matrix(rgb_img, mask)
def test_plantcv_transform_get_matrix_m():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_get_matrix_m_unequal_data():
# load in comparison matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M2), encoding="latin1")
matrix_compare_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B2), encoding="latin1")
matrix_compare_b = matrix_b_file['arr_0']
# read in matrices
t_matrix_file = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
t_matrix = t_matrix_file['arr_0']
s_matrix_file = np.load(os.path.join(TEST_DATA, TEST_SOURCE2_MATRIX), encoding="latin1")
s_matrix = s_matrix_file['arr_0']
# apply matrices to function
matrix_a, matrix_m, matrix_b = pcv.transform.get_matrix_m(t_matrix, s_matrix)
matrix_compare_m = np.rint(matrix_compare_m)
matrix_compare_b = np.rint(matrix_compare_b)
matrix_m = np.rint(matrix_m)
matrix_b = np.rint(matrix_b)
assert np.array_equal(matrix_m, matrix_compare_m) and np.array_equal(matrix_b, matrix_compare_b)
def test_plantcv_transform_calc_transformation_matrix():
# load in comparison matrices
matrix_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_compare = matrix_file['arr_0']
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
# apply to function
_, matrix_t = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b)
matrix_t = np.rint(matrix_t)
matrix_compare = np.rint(matrix_compare)
assert np.array_equal(matrix_t, matrix_compare)
def test_plantcv_transform_calc_transformation_matrix_b_incorrect():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
matrix_b = np.asmatrix(matrix_b, float)
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b.T)
def test_plantcv_transform_calc_transformation_matrix_not_mult():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m, matrix_b[:3])
def test_plantcv_transform_calc_transformation_matrix_not_mat():
# read in matrices
matrix_m_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_M1), encoding="latin1")
matrix_m = matrix_m_file['arr_0']
matrix_b_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_b = matrix_b_file['arr_0']
with pytest.raises(RuntimeError):
_, _ = pcv.transform.calc_transformation_matrix(matrix_m[:, 1], matrix_b[:, 1])
def test_plantcv_transform_apply_transformation():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# Test with debug = None
pcv.params.debug = None
corrected_img = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
# assert source and corrected have same shape
assert np.array_equal(corrected_img, corrected_compare)
def test_plantcv_transform_apply_transformation_incorrect_t():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_MATRIX_B1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_apply_transformation_incorrect_img():
# read in matrices
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# read in images
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.apply_transformation_matrix(source_img, target_img, matrix_t)
def test_plantcv_transform_save_matrix():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = os.path.join(cache_dir, 'test.npz')
pcv.transform.save_matrix(matrix_t, filename)
assert os.path.exists(filename) is True
def test_plantcv_transform_save_matrix_incorrect_filename():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# read in matrix
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# .npz filename
filename = "test"
with pytest.raises(RuntimeError):
pcv.transform.save_matrix(matrix_t, filename)
def test_plantcv_transform_load_matrix():
# read in matrix_t
matrix_t_file = np.load(os.path.join(TEST_DATA, TEST_TRANSFORM1), encoding="latin1")
matrix_t = matrix_t_file['arr_0']
# test load function with matrix_t
matrix_t_loaded = pcv.transform.load_matrix(os.path.join(TEST_DATA, TEST_TRANSFORM1))
assert np.array_equal(matrix_t, matrix_t_loaded)
def test_plantcv_transform_correct_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform")
os.mkdir(cache_dir)
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
matdir = os.path.join(cache_dir, "saved_matrices")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(matdir)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, cache_dir)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_correct_color_output_dne():
# load corrected image to compare
corrected_compare = cv2.imread(os.path.join(TEST_DATA, TEST_S1_CORRECTED))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_correct_color_output_dne")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
# Read in target, source, and gray-scale mask
target_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
source_img = cv2.imread(os.path.join(TEST_DATA, TEST_SOURCE1_IMG))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_MASK), -1)
output_path = os.path.join(cache_dir, "saved_matrices_1") # output_directory that does not currently exist
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = imgdir
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _, _ = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# Test with debug = None
pcv.params.debug = None
_, _, matrix_t, corrected_img = pcv.transform.correct_color(target_img, mask, source_img, mask, output_path)
# assert source and corrected have same shape
assert all([np.array_equal(corrected_img, corrected_compare),
os.path.exists(os.path.join(output_path, "target_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "source_matrix.npz")) is True,
os.path.exists(os.path.join(output_path, "transformation_matrix.npz")) is True])
def test_plantcv_transform_create_color_card_mask():
# Load target image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_create_color_card_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=(166, 166),
spacing=(21, 21), nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_quick_color_check():
# Load target image
t_matrix = np.load(os.path.join(TEST_DATA, TEST_TARGET_MATRIX), encoding="latin1")
target_matrix = t_matrix['arr_0']
s_matrix = np.load(os.path.join(TEST_DATA, TEST_SOURCE1_MATRIX), encoding="latin1")
source_matrix = s_matrix['arr_0']
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_quick_color_check")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
# Test with debug = None
pcv.params.debug = None
pcv.transform.quick_color_check(target_matrix, source_matrix, num_chips=22)
assert os.path.exists(os.path.join(cache_dir, "color_quick_check.png"))
def test_plantcv_transform_find_color_card():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
df, start, space = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='adaptgauss', blurry=False,
threshvalue=90)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
# Test with debug = None
pcv.params.debug = None
mask = pcv.transform.create_color_card_mask(rgb_img=rgb_img, radius=6, start_coord=start,
spacing=space, nrows=6, ncols=4, exclude=[20, 0])
assert all([i == j] for i, j in zip(np.unique(mask), np.array([0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110,
120, 130, 140, 150, 160, 170, 180, 190, 200, 210,
220], dtype=np.uint8)))
def test_plantcv_transform_find_color_card_optional_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='normal', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_otsu():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card_otsu")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with threshold ='normal'
df1, start1, space1 = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='otsu', blurry=True,
background='light', threshvalue=90, label="prefix")
assert pcv.outputs.observations["prefix"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size="mean")
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] > 15000
def test_plantcv_transform_find_color_card_optional_size_parameters_none():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_COLOR_CARD))
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_find_color_card")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size=None)
assert pcv.outputs.observations.get("default") is None
def test_plantcv_transform_find_color_card_bad_record_chip_size():
# Clear previous outputs
pcv.outputs.clear()
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, record_chip_size='averageeeed')
assert pcv.outputs.observations["default"]["color_chip_size"]["value"] is None
def test_plantcv_transform_find_color_card_bad_thresh_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, threshold_type='gaussian')
def test_plantcv_transform_find_color_card_bad_background_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img, background='lite')
def test_plantcv_transform_find_color_card_bad_colorcard():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG_WITH_HEXAGON))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_, _, _ = pcv.transform.find_color_card(rgb_img=rgb_img)
def test_plantcv_transform_rescale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_rescale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
pcv.params.debug = "plot"
rescaled_img = pcv.transform.rescale(gray_img=gray_img, min_value=0, max_value=100)
assert max(np.unique(rescaled_img)) == 100
def test_plantcv_transform_rescale_bad_input():
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
with pytest.raises(RuntimeError):
_ = pcv.transform.rescale(gray_img=rgb_img)
def test_plantcv_transform_resize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize(img=gray_img, size=size, interpolation="auto")
assert resized_img.shape == size
def test_plantcv_transform_resize_unsupported_method():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize(img=gray_img, size=(100, 100), interpolation="mymethod")
def test_plantcv_transform_resize_crop():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (20, 20)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
size = (100, 100)
resized_im = pcv.transform.resize(img=gray_img, size=size, interpolation=None)
assert resized_im.shape == size
def test_plantcv_transform_resize_pad_crop_color():
color_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL))
size = (100, 100)
resized_im = pcv.transform.resize(img=color_img, size=size, interpolation=None)
assert resized_im.shape == (size[1], size[0], 3)
def test_plantcv_transform_resize_factor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_trancform_resize_factor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
# Resizing factors
factor_x = 0.5
factor_y = 0.2
# Test with debug "print"
pcv.params.debug = "print"
_ = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
# Test with debug "plot"
pcv.params.debug = "plot"
resized_img = pcv.transform.resize_factor(img=gray_img, factors=(factor_x, factor_y), interpolation="auto")
output_size = resized_img.shape
expected_size = (int(gray_img.shape[0] * factor_y), int(gray_img.shape[1] * factor_x))
assert output_size == expected_size
def test_plantcv_transform_resize_factor_bad_input():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
with pytest.raises(RuntimeError):
_ = pcv.transform.resize_factor(img=gray_img, factors=(0, 2), interpolation="auto")
def test_plantcv_transform_nonuniform_illumination_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_TARGET_IMG))
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=rgb_img, ksize=11)
assert np.mean(corrected) < np.mean(rgb_img)
def test_plantcv_transform_nonuniform_illumination_gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_transform_nonuniform_illumination")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Load rgb image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "plot"
_ = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
pcv.params.debug = "print"
corrected = pcv.transform.nonuniform_illumination(img=gray_img, ksize=11)
assert np.shape(corrected) == np.shape(gray_img)
def test_plantcv_transform_warp_default():
pcv.params.debug = "plot"
img = create_test_img((12, 10, 3))
refimg = create_test_img((12, 10, 3))
pts = [(0, 0),(1, 0),(0, 3),(4, 4)]
refpts = [(0, 0),(1, 0),(0, 3),(4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="default")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_lmeds():
pcv.params.debug = "plot"
img = create_test_img((10, 10, 3))
refimg = create_test_img((11, 11))
pts = [(0, 0), (1, 0), (0, 3), (4, 4)]
refpts = [(0, 0), (1, 0), (0, 3), (4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="lmeds")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_rho():
pcv.params.debug = "plot"
img = create_test_img_bin((10, 10))
refimg = create_test_img((11, 11))
pts = [(0, 0), (1, 0), (0, 3), (4, 4)]
refpts = [(0, 0), (1, 0), (0, 3), (4, 4)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="rho")
assert mat.shape == (3, 3)
def test_plantcv_transform_warp_ransac():
pcv.params.debug = "plot"
img = create_test_img((100, 150))
refimg = create_test_img((10, 15))
pts = [(0, 0), (149, 0), (99, 149), (0, 99), (3, 3)]
refpts = [(0, 0), (0, 14), (9, 14), (0, 9), (3, 3)]
warped_img, mat = pcv.transform.warp(img, refimg, pts, refpts, method="ransac")
assert mat.shape == (3, 3)
@pytest.mark.parametrize("pts, refpts", [
[[(0,0)],[(0,0),(0,1)]], # different # of points provided for img and refimg
[[(0,0)],[(0,0)]], # not enough pairs of points provided
[[(0, 0), (0, 14), (9, 14), (0, 9), (3, 3)],
[(0, 0), (149, 0), (99, 149), (0, 99), (3, 3)]] # homography not able to be calculated (cannot converge)
])
def test_plantcv_transform_warp_err(pts, refpts):
img = create_test_img((10, 15))
refimg = create_test_img((100, 150))
method = "rho"
with pytest.raises(RuntimeError):
pcv.transform.warp(img, refimg, pts, refpts, method=method)
def test_plantcv_transform_warp_align():
img = create_test_img((10, 10, 3))
refimg = create_test_img((11, 11))
mat = np.array([[ 1.00000000e+00, 1.04238500e-15, -7.69185075e-16],
[ 1.44375646e-16, 1.00000000e+00, 0.00000000e+00],
[-5.41315251e-16, 1.78930521e-15, 1.00000000e+00]])
warp_img = pcv.transform.warp_align(img=img, mat=mat, refimg=refimg)
assert warp_img.shape == (11, 11, 3)
# ##############################
# Tests for the threshold subpackage
# ##############################
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_binary(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_binary_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.binary(gray_img=gray_img, threshold=25, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_gaussian(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_gaussian_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.gaussian(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_mean(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with object type = dark
pcv.params.debug = None
binary_img = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_mean_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.mean(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("objtype", ["dark", "light"])
def test_plantcv_threshold_otsu(objtype):
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GREENMAG), -1)
# Test with object set to light
pcv.params.debug = None
binary_img = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type=objtype)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_otsu_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.otsu(gray_img=gray_img, max_value=255, object_type="lite")
@pytest.mark.parametrize("channel,lower_thresh,upper_thresh", [["HSV", [0, 0, 0], [255, 255, 255]],
["LAB", [0, 0, 0], [255, 255, 255]],
["RGB", [0, 0, 0], [255, 255, 255]],
["GRAY", [0], [255]]])
def test_plantcv_threshold_custom_range_rgb(channel, lower_thresh, upper_thresh):
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
mask, binary_img = pcv.threshold.custom_range(img, lower_thresh=lower_thresh, upper_thresh=upper_thresh,
channel=channel)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_grayscale():
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
# # Test channel='gray'
mask, binary_img = pcv.threshold.custom_range(gray_img, lower_thresh=[0], upper_thresh=[255], channel='gray')
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_custom_range_bad_input_hsv():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='HSV')
def test_plantcv_threshold_custom_range_bad_input_rgb():
# Read in test data
pcv.params.debug = None
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2, 2], channel='RGB')
def test_plantcv_threshold_custom_range_bad_input_lab():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2, 2, 2], channel='LAB')
def test_plantcv_threshold_custom_range_bad_input_gray():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0, 0], upper_thresh=[2], channel='gray')
def test_plantcv_threshold_custom_range_bad_input_channel():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_, _ = pcv.threshold.custom_range(img, lower_thresh=[0], upper_thresh=[2], channel='CMYK')
@pytest.mark.parametrize("channel", ["all", "any"])
def test_plantcv_threshold_saturation(channel):
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = None
pcv.params.debug = None
thresh = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel=channel)
assert len(np.unique(thresh)) == 2
def test_plantcv_threshold_saturation_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.threshold.saturation(rgb_img=rgb_img, threshold=254, channel="red")
def test_plantcv_threshold_triangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_threshold_triangle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="dark", xstep=10)
pcv.params.debug = "plot"
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
pcv.params.debug = "print"
binary_img = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="light", xstep=10)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_threshold_triangle_incorrect_object_type():
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.threshold.triangle(gray_img=gray_img, max_value=255, object_type="lite", xstep=10)
def test_plantcv_threshold_texture():
# Test with debug = None
pcv.params.debug = None
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
binary_img = pcv.threshold.texture(gray_img, ksize=6, threshold=7, offset=3, texture_method='dissimilarity',
borders='nearest', max_value=255)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(binary_img), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(binary_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def create_test_img(sz_img):
img = np.random.randint(np.prod(sz_img), size=sz_img) * 255
img = img.astype(np.uint8)
return img
def create_test_img_bin(sz_img):
img = np.zeros(sz_img)
img[3:7, 2:8] = 1
return img
@pytest.mark.parametrize("bad_type", ["native", "nan", "inf"])
def test_plantcv_threshold_mask_bad(bad_type):
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
bad_img[2, 3] = np.nan
sz = np.shape(bad_img)
pcv.params.debug = None
mask = pcv.threshold.mask_bad(bad_img, bad_type=bad_type)
assert((np.shape(mask) == sz) and (len(np.unique(mask)) == 2))
def test_plantcv_threshold_mask_bad_native_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
sz = np.shape(bad_img)
mask10 = pcv.threshold.mask_bad(bad_img, bad_type='native')
assert mask10.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_nan_bad_input():
# Create a synthetic bad image
bad_img = np.reshape(np.random.rand(25), (5, 5))
bad_img[2, 2] = np.inf
sz = np.shape(bad_img)
mask11 = pcv.threshold.mask_bad(bad_img, bad_type='nan')
assert mask11.all() == np.zeros(sz, dtype='uint8').all()
def test_plantcv_threshold_mask_bad_input_color_img():
# Read in test data
bad_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.threshold.mask_bad(bad_img, bad_type='nan')
# ###################################
# Tests for the visualize subpackage
# ###################################
def test_plantcv_visualize_auto_threshold_methods_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
def test_plantcv_visualize_auto_threshold_methods():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_threshold_methods")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
_ = pcv.visualize.auto_threshold_methods(gray_img=img)
pcv.params.debug = "plot"
labeled_imgs = pcv.visualize.auto_threshold_methods(gray_img=img)
assert len(labeled_imgs) == 5 and np.shape(labeled_imgs[0])[0] == np.shape(img)[0]
@pytest.mark.parametrize("debug,axes", [["print", True], ["plot", False]])
def test_plantcv_visualize_pseudocolor(debug, axes, tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
pcv.params.debug_outdir = cache_dir
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
# Debug mode
pcv.params.debug = debug
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, mask=None, title="Pseudocolored image", axes=axes,
bad_mask=mask_bad)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM))
@pytest.mark.parametrize("bkgrd,axes,pad", [["image", True, "auto"], ["white", False, 1], ["black", True, "auto"]])
def test_plantcv_visualize_pseudocolor_mask(bkgrd, axes, pad):
# Test with debug = None
pcv.params.debug = None
# Input image
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input mask
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Input contours
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
r, c = img.shape
# generate 200 "bad" pixels
mask_bad = np.zeros((r, c), dtype=np.uint8)
mask_bad = np.reshape(mask_bad, (-1, 1))
mask_bad[0:100] = 255
mask_bad = np.reshape(mask_bad, (r, c))
pseudo_img = pcv.visualize.pseudocolor(gray_img=img, obj=obj_contour, mask=mask, background=bkgrd,
bad_mask=mask_bad, title="Pseudocolored image", axes=axes, obj_padding=pad)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(pseudo_img), TEST_BINARY_DIM)):
assert 1
else:
assert 0
def test_plantcv_visualize_pseudocolor_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img)
def test_plantcv_visualize_pseudocolor_bad_background():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, background="pink")
def test_plantcv_visualize_pseudocolor_bad_padding():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor_bad_background")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
with pytest.raises(RuntimeError):
_ = pcv.visualize.pseudocolor(gray_img=img, mask=mask, obj=obj_contour, obj_padding="pink")
def test_plantcv_visualize_pseudocolor_bad_mask():
# Test with debug = None
pcv.params.debug = None
def test_plantcv_visualize_colorize_masks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=[(0, 0, 0), (1, 1, 1)])
# Test with debug = None
pcv.params.debug = None
colored_img = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']],
colors=['red', 'blue'])
# Assert that the output image has the dimensions of the input image
assert not np.average(colored_img) == 0
def test_plantcv_visualize_colorize_masks_bad_input_empty():
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[], colors=[])
def test_plantcv_visualize_colorize_masks_bad_input_mismatch_number():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 'green', 'blue'])
def test_plantcv_visualize_colorize_masks_bad_color_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorize_masks(masks=[mask['plant'], mask['background']], colors=['red', 1.123])
def test_plantcv_visualize_colorize_label_img():
label_img = np.array([[1,2,3],[4,5,6],[7,8,9]])
pcv.params.debug = None
colored_img = pcv.visualize.colorize_label_img(label_img)
assert (colored_img.shape[0:-1] == label_img.shape) and colored_img.shape[-1] == 3
@pytest.mark.parametrize("bins,lb,ub,title", [[200, 0, 255, "Include Title"], [100, None, None, None]])
def test_plantcv_visualize_histogram(bins, lb, ub, title):
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
fig_hist, hist_df = pcv.visualize.histogram(img=img, mask=mask, bins=bins, lower_bound=lb, upper_bound=ub,
title=title, hist_data=True)
assert all([isinstance(fig_hist, ggplot), isinstance(hist_df, pd.core.frame.DataFrame)])
def test_plantcv_visualize_histogram_no_mask():
# Test with debug = None
pcv.params.debug = None
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
fig_hist = pcv.visualize.histogram(img=img, mask=None)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_rgb_img():
# Test with debug = None
pcv.params.debug = None
# Test RGB input image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fig_hist = pcv.visualize.histogram(img=img_rgb)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_multispectral_img():
# Test with debug = None
pcv.params.debug = None
# Test multi-spectral image
img_rgb = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_multi = np.concatenate((img_rgb, img_rgb), axis=2)
fig_hist = pcv.visualize.histogram(img=img_multi)
assert isinstance(fig_hist, ggplot)
def test_plantcv_visualize_histogram_no_img():
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=None)
def test_plantcv_visualize_histogram_array():
# Read test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.histogram(img=img[0, :])
def test_plantcv_visualize_clustered_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_BACKGROUND), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CONTOUR), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_HIERARCHY), encoding="latin1")
cluster_i = np.load(os.path.join(TEST_DATA, TEST_INPUT_VISUALIZE_CLUSTERS), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
cluster = [cluster_i[arr_n] for arr_n in cluster_i]
# Test in plot mode
pcv.params.debug = "plot"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
_ = pcv.visualize.clustered_contours(img=img1, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, bounding=False)
# Test in print mode
pcv.params.debug = "print"
# Reset the saved color scale (can be saved between tests)
pcv.params.saved_color_scale = None
cluster_img = pcv.visualize.clustered_contours(img=img, grouped_contour_indices=cluster, roi_objects=objs,
roi_obj_hierarchy=obj_hierarchy, nrow=2, ncol=2, bounding=True)
assert np.sum(cluster_img) > np.sum(img)
def test_plantcv_visualize_colorspaces():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = "plot"
vis_img_small = pcv.visualize.colorspaces(rgb_img=img, original_img=False)
pcv.params.debug = "print"
vis_img = pcv.visualize.colorspaces(rgb_img=img)
assert np.shape(vis_img)[1] > (np.shape(img)[1]) and np.shape(vis_img_small)[1] > (np.shape(img)[1])
def test_plantcv_visualize_colorspaces_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_hist")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.visualize.colorspaces(rgb_img=img)
def test_plantcv_visualize_overlay_two_imgs():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
pcv.params.debug = None
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = img1[1445, 1154]
sample_pt2 = img2[1445, 1154]
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_grayscale():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_grayscale")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
out_img = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
sample_pt1 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt2 = np.array([255, 255, 255], dtype=np.uint8)
sample_pt3 = out_img[1445, 1154]
pred_rgb = (sample_pt1 * 0.5) + (sample_pt2 * 0.5)
pred_rgb = pred_rgb.astype(np.uint8)
assert np.array_equal(sample_pt3, pred_rgb)
def test_plantcv_visualize_overlay_two_imgs_bad_alpha():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_bad_alpha")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
alpha = -1
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2, alpha=alpha)
def test_plantcv_visualize_overlay_two_imgs_size_mismatch():
pcv.params.debug = None
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_visualize_overlay_two_imgs_size_mismatch")
os.mkdir(cache_dir)
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
with pytest.raises(RuntimeError):
_ = pcv.visualize.overlay_two_imgs(img1=img1, img2=img2)
@pytest.mark.parametrize("title", ["Include Title", None])
def test_plantcv_visualize_obj_size_ecdf(title):
pcv.params.debug = None
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
fig_ecdf = plantcv.plantcv.visualize.obj_size_ecdf(mask=mask, title=title)
assert isinstance(fig_ecdf, ggplot)
# ##############################
# Tests for the utils subpackage
# ##############################
def test_plantcv_utils_json2csv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv")
os.mkdir(cache_dir)
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "merged_output.json"),
csv_file=os.path.join(cache_dir, "exports"))
assert all([os.path.exists(os.path.join(cache_dir, "exports-single-value-traits.csv")),
os.path.exists(os.path.join(cache_dir, "exports-multi-value-traits.csv"))])
def test_plantcv_utils_json2csv_no_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_no_json")
os.mkdir(cache_dir)
with pytest.raises(IOError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "not_a_file.json"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_json2csv_bad_json():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_json2csv_bad_json")
os.mkdir(cache_dir)
with pytest.raises(ValueError):
plantcv.utils.json2csv(json_file=os.path.join(TEST_DATA, "incorrect_json_data.txt"),
csv_file=os.path.join(cache_dir, "exports"))
def test_plantcv_utils_sample_images_snapshot():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "snapshot")
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=3)
assert os.path.exists(os.path.join(cache_dir, "snapshot"))
def test_plantcv_utils_sample_images_flatdir():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=30)
random_images = os.listdir(img_outdir)
assert all([len(random_images) == 30, len(np.unique(random_images)) == 30])
def test_plantcv_utils_sample_images_bad_source():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
fake_dir = os.path.join(TEST_DATA, "snapshot")
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(IOError):
plantcv.utils.sample_images(source_path=fake_dir, dest_path=img_outdir, num=3)
def test_plantcv_utils_sample_images_bad_flat_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
flat_dir = os.path.join(TEST_DATA)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=flat_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_sample_images_bad_phenofront_num():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_sample_images")
os.mkdir(cache_dir)
snapshot_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
img_outdir = os.path.join(cache_dir, "images")
with pytest.raises(RuntimeError):
plantcv.utils.sample_images(source_path=snapshot_dir, dest_path=img_outdir, num=300)
def test_plantcv_utils_tabulate_bayes_classes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(TEST_DATA, PIXEL_VALUES), output_file=outfile)
table = pd.read_csv(outfile, sep="\t")
assert table.shape == (228, 2)
def test_plantcv_utils_tabulate_bayes_classes_missing_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_utils_tabulate_bayes_classes_missing_input")
os.mkdir(cache_dir)
outfile = os.path.join(cache_dir, "rgb_table.txt")
with pytest.raises(IOError):
plantcv.utils.tabulate_bayes_classes(input_file=os.path.join(PIXEL_VALUES), output_file=outfile)
# ##############################
# Clean up test files
# ##############################
def teardown_function():
shutil.rmtree(TEST_TMPDIR)
| mit |
RPGOne/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 70 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
assad2012/ggplot | ggplot/geoms/geom_linerange.py | 12 | 2881 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
from .geom import geom
from ggplot.utils import is_categorical
import numpy as np
class geom_linerange(geom):
"""Plot intervals represented by vertical lines
Parameters
---------
x
x values of data
ymin
lower end of the interval for each x
ymax
upper end of the interval for each x
alpha : float
alpha value, defaults to 1
color : string
line color, defaults to 'black'
linetype : string
line type, defaults to 'solid'
size : string
width of the line, defaults to 2
Examples
--------
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
np.random.seed(42)
x = np.linspace(0.5, 9.5, num=10)
y = np.random.randn(10)
ymin = y - np.random.uniform(0,1, size=10)
ymax = y + np.random.uniform(0,1, size=10)
data = pd.DataFrame({'x': x, 'ymin': ymin, 'ymax': ymax})
ggplot(aes(x='x', ymin='ymin', ymax='ymax'), data) \
+ geom_linerange()
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black',
'linetype': 'solid',
'size': 2}
REQUIRED_AES = {'x', 'ymin', 'ymax'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity', 'cmap': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
_units = {'alpha', 'color', 'linestyle'}
def __init__(self, *args, **kwargs):
super(geom_linerange, self).__init__(*args, **kwargs)
self._warning_printed = False
def _plot_unit(self, pinfo, ax):
# If x is categorical, calculate positions to plot
categorical = is_categorical(pinfo['x'])
if categorical:
x = pinfo.pop('x')
new_x = np.arange(len(x))
ax.set_xticks(new_x)
ax.set_xticklabels(x)
pinfo['x'] = new_x
if 'linewidth' in pinfo and isinstance(pinfo['linewidth'], list):
# ggplot also supports aes(size=...) but the current mathplotlib
# is not. See https://github.com/matplotlib/matplotlib/issues/2658
pinfo['linewidth'] = 4
if not self._warning_printed:
msg = "'geom_line()' currenty does not support the mapping of " +\
"size ('aes(size=<var>'), using size=4 as a replacement.\n" +\
"Use 'geom_line(size=x)' to set the size for the whole line.\n"
sys.stderr.write(msg)
self._warning_printed = True
x = pinfo.pop('x')
x = np.vstack([x, x])
ymin = pinfo.pop('ymin')
ymax = pinfo.pop('ymax')
y = np.vstack([ymin, ymax])
ax.plot(x, y, **pinfo)
| bsd-2-clause |
glemaitre/UnbalancedDataset | examples/over-sampling/plot_smote.py | 2 | 2231 | """
=====
SMOTE
=====
An illustration of the SMOTE method and its variant.
"""
# Authors: Fernando Nogueira
# Christos Aridas
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
print(__doc__)
def plot_resampling(ax, X, y, title):
c0 = ax.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0", alpha=0.5)
c1 = ax.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1", alpha=0.5)
ax.set_title(title)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.set_xlim([-6, 8])
ax.set_ylim([-6, 6])
return c0, c1
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.3, 0.7],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=80, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply regular SMOTE
kind = ['regular', 'borderline1', 'borderline2', 'svm']
sm = [SMOTE(kind=k) for k in kind]
X_resampled = []
y_resampled = []
X_res_vis = []
for method in sm:
X_res, y_res = method.fit_sample(X, y)
X_resampled.append(X_res)
y_resampled.append(y_res)
X_res_vis.append(pca.transform(X_res))
# Two subplots, unpack the axes array immediately
f, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
# Remove axis for second plot
ax2.axis('off')
ax_res = [ax3, ax4, ax5, ax6]
c0, c1 = plot_resampling(ax1, X_vis, y, 'Original set')
for i in range(len(kind)):
plot_resampling(ax_res[i], X_res_vis[i], y_resampled[i],
'SMOTE {}'.format(kind[i]))
ax2.legend((c0, c1), ('Class #0', 'Class #1'), loc='center',
ncol=1, labelspacing=0.)
plt.tight_layout()
plt.show()
| mit |
rabrahm/ceres | utils/FastRotators/spfr.py | 1 | 18831 | from pylab import *
import pyfits
from PyAstronomy import pyasl
import scipy
from scipy import interpolate
from scipy import ndimage
from scipy import signal
import pickle
from matplotlib.backends.backend_pdf import PdfPages
import os
#from pyevolve import G1DList
#from pyevolve import GSimpleGA
from multiprocessing import Pool
import time
def download_models(webpage='http://svo2.cab.inta-csic.es/theory/models/coelho/high/data/',dest='../../data/'):
os.system('mkdir '+dest+'/COELHO2014')
cwd = os.getcwd()
os.chdir(dest+'/COELHO2014')
tf = np.arange(6000,10001,250)
gf = np.arange(2.5,4.6,0.5)
#gf = np.array([2.5])
zf = np.array([-1.,-0.5,0.0,0.2])
for t in tf:
for g in gf:
for z in zf:
modname = get_modname(t,g,z)
if z<0:
sz = 'm'
else:
sz = 'p'
sz = sz+str(float(np.absolute(z))).replace('.','')+'p00/'
os.system('wget ' + webpage+sz+modname+'.fits')
os.system('wget ' + webpage+sz+modname+'plc.fits')
os.chdir(cwd)
return True
def n_Edlen(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 1e-8 * (8342.13 + 2406030 / (130-sigma2) + 15997/(38.9-sigma2))
return n
def n_Morton(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 6.4328e-5 + 2.94981e-2 / (146.-sigma2) + 2.5540e-4/(41.-sigma2)
return n
def ToAir(l):
return (l / n_Edlen(l))
def ToVacuum(l):
cond = 1
l_prev = l.copy()
while(cond):
l_new = n_Edlen(l_prev) * l
if (max(np.absolute(l_new - l_prev)) < 1e-10): cond = 0
l_prev = l_new
return l_prev
def get_modname(t,g,z):
st = str(int(t))
if t<10000:
st = '0'+st
sg = '+'+str(np.around(g,1))
if z < 0:
sz = 'm'
else:
sz = 'p'
z=float(z)
sz = sz + str(np.around(np.absolute(z),1))
sz = sz.replace('.','')
return 't'+st+'_g'+sg+'_'+sz+'p00_hr'
def get_model(t,g,z,model_path='../../data/COELHO2014/'):
modname = model_path + get_modname(t,g,z)
try:
out = pyfits.getdata(modname+'.fits')
except:
out = pyfits.getdata(modname+'plc.fits')
return out
def get_near(x,vec):
if x == vec[0]:
mmin = vec[0]
mmax = vec[1]
elif x == vec[-1]:
mmin = vec[-2]
mmax = vec[-1]
else:
tvec = vec - x
In = np.where(tvec < 0)[0]
mmin = tvec[In].max() + x
Ix = np.where(tvec >= 0)[0]
mmax = tvec[Ix].min() + x
return mmin,mmax
def trilinear_interpolation(t,g,z,model_path='../../data/COELHO2014/'):
teffs = np.arange(6000,10001,250)
loggs = np.arange(2.5,4.6,0.5)
fehs = np.array([-1.,-0.5,0.0,0.2])
x0,x1 = get_near(t,teffs)
y0,y1 = get_near(g,loggs)
z0,z1 = get_near(z,fehs)
xd = (t-x0)/(x1-x0)
yd = (g-y0)/(y1-y0)
zd = (z-z0)/(z1-z0)
try:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'.fits')
except:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'plc.fits')
c000 = get_model(x0,y0,z0,model_path)
c001 = get_model(x0,y0,z1,model_path)
c010 = get_model(x0,y1,z0,model_path)
c100 = get_model(x1,y0,z0,model_path)
c110 = get_model(x1,y1,z0,model_path)
c101 = get_model(x1,y0,z1,model_path)
c011 = get_model(x0,y1,z1,model_path)
c111 = get_model(x1,y1,z1,model_path)
wav = np.arange(len(c111))*hd['CDELT1'] + hd['CRVAL1']
c00 = c000*(1-xd) + c100*xd
c01 = c001*(1-xd) + c101*xd
c10 = c010*(1-xd) + c110*xd
c11 = c011*(1-xd) + c111*xd
c0 = c00*(1-yd) + c10*yd
c1 = c01*(1-yd) + c11*yd
c = c0*(1-zd) + c1*zd
return wav,c
def normalize_model(w,f):
ow = w.copy()
of = f.copy()
#plot(w,f)
while True:
#medflts = scipy.signal.medfilt(f,1001)
coef = np.polyfit(w,f,6)
fited = np.polyval(coef,w)
res = f - fited
I = np.where(res > -np.sqrt(np.var(res)))[0]
w,f = w[I],f[I]
if len(w) < 0.3* len(ow):
break
#plot(ow,np.polyval(coef,ow))
#show()
return coef
def spec_ccf(sw,sf,mw,mf,vi,vf,dv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
v = vi
retccf = []
vels = []
while v<=vf:
swt = sw * (1 + v/299792.458)
mft = interpolate.splev(swt,tck)
#if v == 0:
# plot(swt,mft)
# plot(swt,sft)
# show()
mft -= np.mean(mft)
sft = sf - np.mean(sf)
#sft = sf.copy()
#print np.sum(mft**2),np.sum(sft**2)
retccf.append(np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2)))
vels.append(v)
v+=dv
return np.array(vels),np.array(retccf)
def ccf_fft(swt,sft,mwt,mft):
mf = mft -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(np.log(mwt),mf,k=1)
sw = np.log(swt)
tck2 = interpolate.splrep(sw,sft,k=1)
nsw = np.linspace(sw[0], sw[-1], 5000)
sf = interpolate.splev(nsw,tck2)
mf = interpolate.splev(nsw,tck)
sf -= np.mean(sf)
mf -= np.mean(mf)
plot(nsw,sf)
plot(nsw,mf)
show()
retccf = np.fft.ifft(np.conj(np.fft.fft(sf))*np.fft.fft(mf))
retccf = np.hstack((retccf[2500:],retccf[:2500]))
retvels = np.arange(len(retccf)) - 0.5*len(retccf)
retvels *= (nsw[1]-nsw[0])
retvels = 299792.458*(np.exp(retvels)-1.)
return retvels, retccf
def ccf_simple(sw,sf,mw,mf,rv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
swt = sw * (1 + rv/299792.458)
mft = interpolate.splev(swt,tck)
mft -= np.mean(mft)
sft = sf - np.mean(sf)
return np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2))
def clean_strong_lines(mw,sc,mode=1):
if mode==1:
#""""
I = np.where((mw>6520)&(mw<6600))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4310)&(mw<4360))[0]
sc[I] = 1.
I = np.where((mw>4840)&(mw<4880))[0]
sc[I] = 1.
I = np.where((mw>4070)&(mw<4130))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
if mode==2:
#""""
I = np.where((mw>6550)&(mw<6570))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4320)&(mw<4350))[0]
sc[I] = 1.
I = np.where((mw>4850)&(mw<4870))[0]
sc[I] = 1.
I = np.where((mw>4090)&(mw<4110))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
return sc
def RVforFR(wavs,flxs,teff=6700,logg=4.0,feh=-1.0,vsini=100.,model_path='../../data/COELHO2014/',vmin=-1000.,vmax=1000.,vstep=10.):
def fitfunc(p,x):
ret = p[3] + p[0] * np.exp(-.5*((x-p[1])/p[2])**2)
return ret
errfunc = lambda p,x,y: np.ravel( (fitfunc(p,x)-y) )
#sc = get_model(teff,logg,feh)
#hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
#wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
teff = float(teff)
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
for order in range(len(flxs)):
flxs[order] = clean_strong_lines(wavs[order],flxs[order])
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
#plot(wavs[i],flxs[i])
scf = flxs[i]
scw = wavs[i]
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#plot(ccv,ccf)
#show()
#ccf = np.array(ccf)
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
#show()
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
p0 = [ccftot.min(),ccv[np.argmin(ccftot)],vsini,ccftot[0]]
p1, success = scipy.optimize.leastsq(errfunc,p0, args=(ccv,ccftot))
return p1,ccv,ccftot,fitfunc(p1,ccv)
def calc_bss2(vels,xc,coef, bot_i=0.15, bot_f=0.4, top_i=0.6, top_f=0.9, dt=0.01):
try:
I1 = np.where((vels>coef[1]-3*coef[2]) & (vels<coef[1]) )[0]
I2 = np.where((vels<coef[1]+3*coef[2]) & (vels>coef[1]) )[0]
I3 = np.where(vels<coef[1]-4*coef[2])[0]
I4 = np.where(vels>coef[1]+4*coef[2])[0]
I = np.hstack((I3,I4))
base = np.median(xc[I])
xc = base - xc
xc /= xc.max()
v1,x1 = vels[I1],xc[I1]
v2,x2 = vels[I2],xc[I2]
#plot(v1,x1)
#plot(v2,x2)
#show()
dp = top_f
vect = []
while dp >= top_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vect.append(0.5*(bs2+bs1))
dp-=dt
vect = np.array(vect)
dp = bot_f
vecb = []
while dp >= bot_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vecb.append(0.5*(bs2+bs1))
dp-=dt
vecb = np.array(vecb)
return np.median(vecb) - np.median(vect)
except:
return -999.0
"""
def lnlike(theta, W, F, Ferr):
mw,sc = trilinear_interpolation(int(theta[0]),theta[1],theta[2])
sct = clean_strong_lines(mw,sc.copy())
#plot(mw,sc)
#show()
coef = normalize_model(mw,sct)
sc /= np.polyval(coef,mw)
#print gfd
mw = ToVacuum(mw)
mw *= 1 + theta[3]/299792.458
totD,totM,totE = np.array([]),np.array([]),np.array([])
for i in range(W.shape[0]):
scf = F[i]
scw = W[i]
scfe = Ferr[i]
J = np.where(scf!=0)[0]
scw,scf,scfe = scw[J],scf[J],scfe[J]
I = np.where((mw>scw[0]-10) & (mw<scw[-1]+10))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, theta[4])
tck = interpolate.splrep(mw[I],tmf,k=1)
tmf = interpolate.splev(scw,tck)
tmf = clean_strong_lines(scw,tmf.copy())
I = np.where(tmf!=1)[0]
#plot(scw,tmf)
#plot(scw[I],tmf[I])
#plot(scw[I],scf[I])
#show()
#print gfd
tmf = tmf[I]
scf = scf[I]
scfe = scfe[I]
tmf /= np.sum(tmf)
tsf = scf/np.sum(scf)
tse = scfe*(np.sum(scf)**2)
totD = np.hstack((totD,tsf))
totM = np.hstack((totM,tmf))
totE = np.hstack((totE,tse))
#plot(scw[I],tsf)
#plot(scw[I],tmf)
#plot(scw[I],tsf + 1./np.sqrt(tse))
#show()
#print fds
#print theta
#show()
#print gvfd
#ret = -np.log(2*np.pi) + np.log(np.sum(np.exp(-0.5*((y-model)/yerr)**2)/yerr))
#ret = -0.5*(np.sum(inv_sigma2*(F-model)**2 - np.log(inv_sigma2)))
ret = -0.5*(np.sum(totE*(totD-totM)**2 - np.log(totE)))
#for i in range(len(F)):
# errorbar(Y,F[i],yerr=Ferr[i],fmt='b')
#for j in model:
# plot(Y,j,'r')
#show()
#print theta, ret
if np.isnan(ret):
return -np.inf
else:
return ret
def lnprior(theta):
if 6000 < theta[0] < 9000 and 3.0 < theta[1] < 4.5 and -1 < theta[2] < 0.2 and -500 < theta[3] < 500 and 1. < theta[4] < 500.:
return 0.0
return -np.inf
def lnprob(theta, W,F,Ferr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,W,F,Ferr)
"""
def multiccf(pars):
teff,logg,feh,vsini=pars[0],pars[1],pars[2],pars[3]
vmin=-500
vmax=500.
vstep=20.
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
scf = flxs[i].copy()
scw = wavs[i].copy()
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#ccv,ccf = ccf_fft(scw,scf,mw[I],tmf)
#plot(ccv,ccf)
#show()
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
#print gfds
#ccftot = np.mean(ccftot,axis=0)
#print pars, ccftot.min()
return ccftot.min()
def get_pars_fr(wavst,flxst,model_patht='../../data/COELHO2014/',npools=4,fixG=1.0):
for order in range(len(flxst)):
flxst[order] = clean_strong_lines(wavst[order],flxst[order],mode=1)
t0 = time.time()
global wavs,flxs
global model_path
wavs,flxs=wavst.copy(),flxst.copy()
model_path=model_patht
gt = np.array([6000,7000,8000,9000,10000])
gg = np.array([2.5,3.0,3.5,4.0,4.5])
if fixG != -1:
gg = np.array([fixG])
gz = np.array([-1,-0.5,0.0,0.2])
gr = np.array([10.,50.,100.,150.,200.,250.,300.])
#"""
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
#for pars in tot:
# pars = [8000,4.0,-0.5,40.0]
# print pars, multiccf(pars)
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
#"""
t1 = time.time()
print bt,bg,bz,br, (t1-t0)/60.,'mins'
#bt,bg,bz,br = 7000.,4.5, 0.2, 100.0
gt = np.arange(bt-1000,bt+1001,250)
I = np.where((gt>=6000) & (gt<=10000))[0]
gt = gt[I]
gr = np.arange(br-60.,br+61.,20.)
I = np.where(gr>=10)[0]
gr = gr[I]
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
t2 = time.time()
print bt,bg,bz,br, (t2-t1)/60.,'mins'
#np.savetxt('temp_grid.txt',vals)
if fixG==-1:
grid = np.reshape(vals,(len(gt),len(gg),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckg = interpolate.splrep(gg,np.arange(len(gg)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckg = interpolate.splrep(np.arange(len(gg)),gg,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sg = np.arange(gg[0],gg[-1]+0.01,0.1)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sg = interpolate.splev(sg,tckg)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sg)*len(sz))
tg2 = np.repeat(np.tile(sg,len(st)),len(sr)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)*len(sg)),len(sr))
tt2 = np.repeat(st,len(sg)*len(sr)*len(sz))
tot2 = np.vstack((tt2,tg2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = interpolate.splev(minval[1],itckg)
minz = interpolate.splev(minval[2],itckz)
minr = interpolate.splev(minval[3],itckr)
else:
grid = np.reshape(vals,(len(gt),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)),len(sr))
tt2 = np.repeat(st,len(sr)*len(sz))
tot2 = np.vstack((tt2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = fixG
minz = interpolate.splev(minval[1],itckz)
minr = interpolate.splev(minval[2],itckr)
#d = {'grid':grid, 'zi':zi, 'tot2':tot2, 'gt':gt, 'gg':gg, 'gz':gz, 'gr':gr}
#pickle.dump(d,open('temp_dict.pkl'))
return float(mint),float(ming),float(minz),float(minr)
def plot_CCF_FR(xc_dict,path='XC.pdf'):
vels = xc_dict['vels']
xc_av = xc_dict['xc_av']
XCmodelgau = xc_dict['XCmodelgau']
#refvel = xc_dict['refvel']
p1gau = xc_dict['p1gau']
f1 = figure()
pp = PdfPages(path)
ax1 = f1.add_subplot(111)
ax1.plot(vels, xc_av,'b.', label='CCF')
ax1.plot(vels, XCmodelgau,'r-',label='Gaussian fit')
xlabel('Velocity (km/s)')
ylabel('XC')
ax1.axvline(p1gau[1],linestyle=':',color='r')
ax1.axhline(0.0,linestyle='-')
title('Average Cross-Correlation Function + Fit')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1],prop={'size':6})
pp.savefig()
pp.close()
clf()
pass
"""
def trans_chromosome(chromosome):
teff = chromosome[0]*100.+chromosome[1]*10.+chromosome[2]
m = (10000.- 6000.)/999.
n = 6000.
teff = teff*m + n
logg = chromosome[3] + chromosome[4]*0.1
m = (4.5 - 3.0)/9.9
n = 3.
logg = logg*m + n
feh = chromosome[5] + chromosome[6]*0.1
m = (0.2 - -1.)/9.9
n = -1.
feh = feh*m + n
vsini = chromosome[7]*10. + chromosome[8]
m = (300. - 10.)/99.
n = 10.
vsini = vsini*m + n
return teff, logg, feh, vsini
global wavs, flxs
def find_pars_GA(wavs,flxs,model_path='../../data/COELHO2014/'):
def eval_func(chromosome):
print list(chromosome)
teff, logg, feh, vsini = trans_chromosome(chromosome)
print teff, logg, feh, vsini
pt,vels,ccf,mod = RVforFR(wavs,flxs,teff=teff,logg=logg,feh=feh,vsini=vsini,model_path=model_path)
score = -ccf.min()
return score
genome = G1DList.G1DList(9)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome, interactiveMode=True)
ga.setGenerations(40)
ga.setMutationRate(0.2)
ga.setPopulationSize(20)
#ga.setCrossoverRate(1.0)
genome.setParams(rangemin=0, rangemax=9)
#ga.setMultiProcessing(True)
ga.evolve(freq_stats=10)
print ga.bestIndividual()
print trans_chromosome(ga.bestIndividual())
"""
| mit |
xiaoweih/DLV | networks/imageNet.py | 1 | 1666 | import os, struct
from array import array as pyarray
from cvxopt.base import matrix
import numpy as np
import PIL.Image
# FIXME: need actual class names
def LABELS(index):
ls = labels()
if len(ls) > 0:
return ls[index]
else: return range(1000)[index]
def labels():
file = open('networks/imageNet/caffe_ilsvrc12/synset_words.txt', 'r')
data = file.readlines()
ls = []
for line in data:
words = line.split()
ls.append(' '.join(words[1:]))
return ls
def save(layer,image,filename):
"""
"""
import cv2
import copy
image_cv = copy.deepcopy(image)
image_cv = image_cv.transpose(1, 2, 0)
image_cv[:,:,0] += 103.939
image_cv[:,:,1] += 116.779
image_cv[:,:,2] += 123.68
#print(np.amax(image_cv),np.amin(image_cv))
cv2.imwrite(filename, image_cv)
# from matplotlib import pyplot
# import matplotlib as mpl
# fig = pyplot.figure()
# ax = fig.add_subplot(1,1,1)
# # image = image.reshape(3,32,32).transpose(1,2,0)
# imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys)
# imgplot.set_interpolation('nearest')
# ax.xaxis.set_ticks_position('top')
# ax.yaxis.set_ticks_position('left')
# pyplot.savefig(filename)
def show(image):
"""
"""
from matplotlib import pyplot
import matplotlib as mpl
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
#image = image.reshape(3,32,32).transpose(1,2,0)
imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
| gpl-3.0 |
aabadie/scikit-learn | examples/manifold/plot_lle_digits.py | 138 | 8594 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
annashcherbina/FASTQSim | src/msa_from_sam.py | 2 | 15121 | #This file does three things:
#1. calculate the frequency of mutations, insertions, and deletions at each position in an
#NGS read.
#2. find the degree of coverage of the reference genome
#3. find the fraction of each subject read that was aligned to a corresponding sequence
#in the refenece genome.
# @author Anna Shcherbina (mailto: anna.shcherbina@ll.mit.edu)
#License: GNU GPL license (http://www.gnu.org/licenses/gpl.html)
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#takes a sam file as input and produces characterization csv files as an output
from helpers import *;
from pandas import DataFrame;
import sys;
inputf=open(sys.argv[1],'r')
outputprefix=sys.argv[2]
plothistogram=False
if len(sys.argv) > 3:
if sys.argv[3]=="-plothistogram":
plothistogram=True
import matplotlib.pyplot as plt
posCount=dict()
delCount=dict()
insertCount=dict()
mutCount=dict()
mutType=dict()
insertSize=dict()
delSize=dict()
Usage=dict()
insertsByRead=[]
delsByRead=[]
for line in inputf:
if line.startswith('@'):
continue
line=line.strip();
line=line.split('\t')
#print str(line)+'\n'
seqname=line[0]
aligned=int(line[1])
# print str(aligned)+'\n'
if aligned !=0:
continue
refname=line[2]
refstartpos=int(line[3])
cigar=line[5]
strand=line[9]
strandlength=len(strand)
md=""
#account for single base pair mutations
for i in range(11,len(line)):
if 'MD' in line[i]:
md=line[i].replace('MD','')
md=md.replace('Z','')
md=md.replace(':','')
break
ref_snp_bases=parseMD(md)
cigarlist=parseCIGAR(cigar)
#posCount
startpos,endpos=getPosCount(cigarlist,strandlength)
Usage[seqname]=[strandlength,endpos-startpos+1,startpos,endpos]
strandofinterest=strand[startpos:endpos+1]
for p in range(startpos,endpos+1):
if p not in posCount:
posCount[p]=1
else:
posCount[p]+=1
alignmentpos=0;
curpos=startpos;
insertcountforread=0;
delcountforread=0;
#get Insertions/Deletions
for block in cigarlist:
if 'h' in block:
continue
elif 'p' in block:
continue
elif 's' in block:
continue
elif ('m' in block) or ('=' in block) or ('x' in block):
#match or single-base mutation
numbases=int(block.replace('m',''))
alignmentpos+=numbases
curpos+=numbases
elif 'i' in block:
insertcountforread+=1
insertionsize=int(block.replace('i',''))
insertionbases=strand[curpos+1:curpos+2+insertionsize]
for p in range(curpos+1,curpos+2+insertionsize):
if p not in insertCount:
insertCount[p]=1
else:
insertCount[p]+=1
longestRepeat=getLongestRepeat(insertionbases)
if insertionsize not in insertSize:
insertSize[insertionsize]=[1,0]
else:
insertSize[insertionsize][0]+=1
if (longestRepeat > 0) and (longestRepeat not in insertSize):
insertSize[longestRepeat]=[0,1]
elif longestRepeat > 0:
insertSize[longestRepeat][1]+=1
curpos+=insertionsize
alignmentpos+=insertionsize
elif 'd' in block:
delcountforread+=1
deletionsize=int(block.replace('d',''))
deletionbases=strand[curpos+1:curpos+2+deletionsize]
for p in range(curpos+1,curpos+2+deletionsize):
if p not in delCount:
delCount[p]=1
else:
delCount[p]+=1
longestRepeat=getLongestRepeat(deletionbases)
if deletionsize not in delSize:
delSize[deletionsize]=[1,0]
else:
delSize[deletionsize][0]+=1
if (longestRepeat > 0) and (longestRepeat not in delSize):
delSize[longestRepeat]=[0,1]
elif longestRepeat > 0:
delSize[longestRepeat][1]+=1
curpos+=deletionsize
alignmentpos+=deletionsize
else:
print "unknown Op in CIGAR:"+str(block)
#print "strandofinterest:"+str(strandofinterest)+'\n'
#Handle single point mutations from MD tag
insertsByRead.append(insertcountforread)
delsByRead.append(delcountforread)
mutation_pos=identifySNPs(cigarlist,strandofinterest);
#offset each mutation position by the start of the alignment
if len(ref_snp_bases)!=len(mutation_pos):
print "MD does not agree with MN for strand:\n"
print str(strand)+'\n'
print "using conservative estimate for SNPs\n"
for i in range(min(len(mutation_pos),len(ref_snp_bases))):
ref_base=(ref_snp_bases[i]).lower()
snp_pos=int(mutation_pos[i][0])+startpos
seq_base=(mutation_pos[i][1]).lower()
if snp_pos not in mutCount:
mutCount[snp_pos]=1
else:
mutCount[snp_pos]+=1
if ref_base not in mutType:
mutType[ref_base]=dict()
if seq_base not in mutType[ref_base]:
mutType[ref_base][seq_base]=1
else:
mutType[ref_base][seq_base]+=1
#generate the output files
fout=open(outputprefix+"posCount.csv",'w')
for entry in posCount:
fout.write(str(entry)+','+str(posCount[entry])+'\n')
fout=open(outputprefix+"delCount.csv",'w')
for entry in delCount:
fout.write(str(entry)+','+str(delCount[entry])+'\n')
fout=open(outputprefix+"insertCount.csv",'w')
for entry in insertCount:
fout.write(str(entry)+','+str(insertCount[entry])+'\n')
fout=open(outputprefix+"mutationCount.csv","w")
for entry in mutCount:
fout.write(str(entry)+','+str(mutCount[entry])+'\n')
fout=open(outputprefix+"mutationType.csv","w")
for entry in mutType:
fout.write(entry)
for subentry in mutType[entry]:
fout.write(','+subentry+','+str(mutType[entry][subentry]))
fout.write('\n')
fout=open(outputprefix+"insertsByRead.csv","w")
if len(insertsByRead)>0:
fout.write(str(insertsByRead[0]))
for i in range(1,len(insertsByRead)):
fout.write(','+str(insertsByRead[i]))
fout=open(outputprefix+"delsByRead.csv","w")
if len(delsByRead) > 0:
fout.write(str(delsByRead[0]))
for i in range(1,len(delsByRead)):
fout.write(','+str(delsByRead[i]))
fout=open(outputprefix+"insertSize.csv","w")
for entry in insertSize:
fout.write(str(entry)+","+str(insertSize[entry][0])+','+str(insertSize[entry][1])+'\n')
fout=open(outputprefix+"delSize.csv","w")
for entry in delSize:
fout.write(str(entry)+','+str(delSize[entry][0])+","+str(delSize[entry][1])+'\n')
fout=open(outputprefix+"Usage.csv","w")
for entry in Usage:
fout.write(str(entry)+","+str(Usage[entry][0])+','+str(Usage[entry][1])+','+str(Usage[entry][2])+','+str(Usage[entry][3])+'\n')
#write a summary file
fout=open(outputprefix+"summary.csv",'w')
fout.write(outputprefix+"posCount.csv\n")
fout.write(outputprefix+"delCount.csv\n")
fout.write(outputprefix+"insertCount.csv\n")
fout.write(outputprefix+"mutationCount.csv\n")
fout.write(outputprefix+"mutationType.csv\n")
fout.write(outputprefix+"insertSize.csv\n")
fout.write(outputprefix+"delSize.csv\n")
fout.write(outputprefix+"Usage.csv\n")
fout.write(outputprefix+"readHist.csv\n")
fout.write(outputprefix+"qualHist.csv\n")
fout.write(outputprefix+"insertsByRead.csv\n")
fout.write(outputprefix+"delsByRead.csv\n")
#if plotting is enabled, generate plots of the characterization statistics
if plothistogram:
#insertion probability from insertion count.
insertprob=dict()
for entry in posCount:
if entry in insertCount:
insertprob[entry]=float(insertCount[entry])/float(posCount[entry])
plotname=outputprefix+"CharacterizationInsertCount.png"
fig=plt.figure();
ax=fig.add_subplot(111)
ax.bar(insertprob.keys(), insertprob.values(),width=1,color='r',log=True)
ax.set_ylim([10e-5,1])
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Base Position Along a Read',fontsize=20)
ax.set_ylabel('Probability of Insertion',fontsize=20)
ax.set_title('Probability of Insertion as a Function of Base Position\n Dataset '+ str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#plot the insertion size distribution
plotname=outputprefix+"CharacterizationInsertSize.png"
fig=plt.figure()
ax=fig.add_subplot(111)
barwidth=0.2
overallSize=[i - barwidth for i in insertSize.keys()]
overallCount=[i[0] for i in insertSize.values()]
repeatCount=[i[1] for i in insertSize.values()]
bar1=ax.bar(overallSize,overallCount,width=barwidth,color='b',align='center',log=True,label='Total Insertions')
bar2=ax.bar(insertSize.keys(),repeatCount,width=barwidth,color='r',align='center',log=True,label='Repeat Insertions')
ax.legend(loc=1,borderaxespad=0)
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Insert Size',fontsize=20)
ax.set_ylabel('Insert Count',fontsize=20)
ax.set_title('Insertion Size \n Dataset '+sys.argv[1].split('/')[-1],fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#deletion probability from deletion count.
delprob=dict()
for entry in posCount:
if entry in delCount:
delprob[entry]=float(delCount[entry])/float(posCount[entry])
plotname=outputprefix+"CharacterizationDelCount.png"
fig = plt.figure()
ax=fig.add_subplot(111)
ax.bar(delprob.keys(), delprob.values(),color='r',log=True)
ax.set_ylim([10e-5,1])
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Base Position Along a Read',fontsize=20)
ax.set_ylabel('Probability of Deletion',fontsize=20)
ax.set_title('Probability of Deletion as a Function of Base Position\n Dataset '+ str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#plot the deletion size distribution
plotname=outputprefix+"CharacterizationDelSize.png"
fig=plt.figure()
ax=fig.add_subplot(111)
ax.set_yscale('log')
overallSize=[i - 0.1 for i in delSize.keys()]
overallCount=[i[0] for i in delSize.values()]
repeatCount=[i[1] for i in delSize.values()]
bar1=ax.bar(overallSize,overallCount,width=0.2,color='b',align='center',label='Total Deletions',log=True)
bar2=ax.bar(delSize.keys(),repeatCount,width=0.2,color='r',align='center',label='Repeat Deletions',log=True)
ax.legend(loc=1,borderaxespad=0)
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Deletion Size',fontsize=20)
ax.set_ylabel('Deletion Count',fontsize=20)
ax.set_title('Deletion Size \n Dataset'+str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#plot the probability from mutation count
mutprob=dict()
for entry in posCount:
if entry in mutCount:
mutprob[entry]=float(mutCount[entry])/posCount[entry]
plotname=outputprefix+"CharacterizationMutationCount.png"
fig = plt.figure()
ax=fig.add_subplot(111)
ax.bar(mutprob.keys(), mutprob.values(),color='r',log=True)
ax.set_ylim([10e-5,1])
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Base Position Along a Read',fontsize=20)
ax.set_ylabel('Probability of Mutation',fontsize=20)
ax.set_title('Probability of Mutation as a Function of Base Position\n Dataset '+ str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#plot mutation type histogram
mtp=dict()
bases=['a','t','c','g','n']
for b1 in bases:
mtp[b1]=dict()
for b2 in bases:
if b1==b2:
continue
mtp[b1][b2]=0
for key in mutType:
totalmuts=float(sum(mutType[key].values()))
for subkey in mutType[key]:
mtp[key][subkey]=mutType[key][subkey]/max(0.01,totalmuts)
mutationDF=DataFrame([[0,mtp['a']['t'],mtp['a']['c'],mtp['a']['g'],mtp['a']['n']],[mtp['t']['a'],0,mtp['t']['c'],mtp['t']['g'],mtp['t']['n']],[mtp['c']['a'],mtp['c']['t'],0,mtp['c']['g'],mtp['c']['n']],[mtp['g']['a'],mtp['g']['t'],mtp['g']['c'],0,mtp['g']['n']],[mtp['n']['a'],mtp['n']['t'],mtp['n']['c'],mtp['n']['g'],0]],columns=['A','T','C','G','N'])
plotname=outputprefix+'CharacterizationMutType.png'
mutplot=mutationDF.plot(kind='bar',stacked=True)
group_labels=['A','T','C','G','N']
mutplot.set_xticklabels(group_labels)
mutplot.set_ylim([0,1])
plt.setp(mutplot.get_xticklabels(),fontsize=18)
plt.setp(mutplot.get_yticklabels(),fontsize=18)
mutplot.set_xlabel('Original Base',fontsize=20)
mutplot.set_ylabel('Mutated Base',fontsize=20)
mutplot.set_title('Probability of Mutation by Base\n Dataset '+str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
#plot read usage
readUsageDist=[]
for entry in Usage:
totalReadLength=Usage[entry][0]
usedReadLength=Usage[entry][1]
fractionUsed=float(usedReadLength)/float(totalReadLength)
readUsageDist.append(fractionUsed)
plotname=outputprefix+'CharacterizationUsage.png'
fig=plt.figure()
ax = fig.add_subplot(111)
ax.set_yscale('log')
ax.set_xlim([0,1])
ax.hist(readUsageDist,100,histtype='bar',color='r')
plt.setp(ax.get_xticklabels(),fontsize=18)
plt.setp(ax.get_yticklabels(),fontsize=18)
ax.set_xlabel('Fraction of Aligned Bases in Read',fontsize=20)
ax.set_ylabel('Number of Reads',fontsize=20)
ax.set_title('Read Alignment Quality \n Dataset '+str(sys.argv[1].split('/')[-1]),fontsize=20)
plt.grid(True)
plt.savefig(plotname,bbox_inches=0)
| gpl-3.0 |
daodaoliang/neural-network-animation | matplotlib/tests/test_table.py | 10 | 2083 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.testing.decorators import image_comparison
@image_comparison(baseline_images=['table_zorder'],
extensions=['png'],
remove_text=True)
def test_zorder():
data = [[66386, 174296],
[58230, 381139]]
colLabels = ('Freeze', 'Wind')
rowLabels = ['%d year' % x for x in (100, 50)]
cellText = []
yoff = np.array([0.0] * len(colLabels))
for row in reversed(data):
yoff += row
cellText.append(['%1.1f' % (x/1000.0) for x in yoff])
t = np.linspace(0, 2*np.pi, 100)
plt.plot(t, np.cos(t), lw=4, zorder=2)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='center',
zorder=-2,
)
plt.table(cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels,
loc='upper center',
zorder=4,
)
plt.yticks([])
@image_comparison(baseline_images=['table_labels'],
extensions=['png'])
def test_label_colours():
dim = 3
c = np.linspace(0, 1, dim)
colours = plt.cm.RdYlGn(c)
cellText = [['1'] * dim] * dim
fig = plt.figure()
ax1 = fig.add_subplot(4, 1, 1)
ax1.axis('off')
ax1.table(cellText=cellText,
rowColours=colours,
loc='best')
ax2 = fig.add_subplot(4, 1, 2)
ax2.axis('off')
ax2.table(cellText=cellText,
rowColours=colours,
rowLabels=['Header'] * dim,
loc='best')
ax3 = fig.add_subplot(4, 1, 3)
ax3.axis('off')
ax3.table(cellText=cellText,
colColours=colours,
loc='best')
ax4 = fig.add_subplot(4, 1, 4)
ax4.axis('off')
ax4.table(cellText=cellText,
colColours=colours,
colLabels=['Header'] * dim,
loc='best')
| mit |
rbiswas4/SNsims | snsims_previous/snsims/tmp/models.py | 1 | 2804 | #!/usr/bin/env python
import sncosmo.models
import numpy
class SEDFileSource(sncosmo.models.TimeSeriesSource):
"""A TimeSeriesSource stored in a 3-column ASCII file format, for PHASE,
LAMBDA, and F_LAMBDA. The hash symbol # is a comment line.
The spectral flux density of this model is given by
.. math::
F(t, \lambda) = A \\times M(t, \lambda)
where _M_ is the flux defined on a grid in phase and wavelength and _A_
(amplitude) is the single free parameter of the model. It should be noted
that while t and \lambda are in the rest frame of the object, the flux
density is defined at redshift zero. This means that for objects with the
same intrinsic luminosity, the amplitude will be smaller for objects at
larger luminosity distances.
Parameters
----------
filename : str
Name of the filename that contains the Time Series
zero_before : bool, optional
If True, flux at phases before minimum phase will be zeroed. The
default is False, in which case the flux at such phases will be equal
to the flux at the minimum phase (``flux[0, :]`` in the input array).
version : str, optional
Version of the model. Default is `None`.
Returns
-------
`~sncosmo.TimeSeriesSource` instance representing the TimeSeriesSource
in file
"""
_param_names = ['amplitude']
param_names_latex = ['A']
def __init__(self, filename, zero_before=False, version=None):
phase, wave, flux = numpy.loadtxt(filename, unpack=True)
# Convert 3 column format to that expected by TimeSeriesSource
phase_u = numpy.unique(phase)
wave_u = numpy.unique(wave)
lenp = len(phase_u)
lenw = len(wave_u)
if lenp * lenw != len(flux):
raise TypeError('File is not a TimeSeriesSource')
i = numpy.zeros(len(flux), dtype='int')
j = numpy.zeros(len(flux), dtype='int')
for index, p in enumerate(phase_u):
i[phase == p] = index
for index, w in enumerate(wave_u):
j[wave == w] = index
flux = flux[i * lenw + j]
flux = numpy.reshape(flux, (lenp, lenw))
super(SEDFileSource, self).__init__(phase_u, wave_u, flux,
zero_before=False,
name=filename, version=None)
if __name__ == '__main__':
# filename = '/Users/akim/project/SNDATA_ROOT/snsed/NON1A/SDSS-019323.SED'
# data = SEDFileSource(filename)
sn = sncosmo.Model(source='snana-2007nc')
print sn.param_names
# wefwe
import matplotlib.pyplot as plt
plt.plot(data._wave, data.flux(0, data._wave))
plt.plot(sn.source._wave, sn.flux(0, sn.source._wave) * 0.95)
plt.show()
| mit |
mattilyra/scikit-learn | benchmarks/bench_isotonic.py | 38 | 3047 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This allows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
laserson/ibis | docs/sphinxext/ipython_sphinxext/ipython_directive.py | 9 | 37645 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
try:
from traitlets.config import Config
except ImportError:
from IPython import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| apache-2.0 |
mjsauvinen/P4UL | pyRaster/tif2NumpyTile.py | 1 | 1956 | #!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='tif2NumpyTile.py')
parser.add_argument("-f", "--filename",type=str, help="Input tif-image file name.")
parser.add_argument("-fo", "--fileout",type=str, help="Output npz file name.")
parser.add_argument("-r", "--reso",type=float, help="Resolution of the tif-image.")
parser.add_argument("-xo", "--xorig",type=float, nargs=2,default=[0.,0.],\
help="Coords [N,E] of the tif-images top-left corner. Default=[0,0]")
parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the numpy array data. Don't save.",\
action="store_true", default=False)
parser.add_argument("-s", "--scale",type=float, default=1.,\
help="Scale factor for the output. Default=1.")
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
# Renaming, nothing more.
filename = args.filename
fileout = args.fileout
reso = args.reso
ROrig = args.xorig
printOn = args.printOn
printOnly = args.printOnly
sc = args.scale
R = openTifAsNumpy(filename)
dPx = np.array([sc*reso, sc*reso])
Rdict = {'R' : R, 'GlobOrig' : ROrig, 'gridRot' : 0., 'dPx' : dPx}
if( not printOnly ):
print(' Writing file {} ... '.format(fileout) )
saveTileAsNumpyZ( fileout, Rdict)
print(' ... done! ')
if( printOn or printOnly ):
pfig = plt.figure(num=1, figsize=(10.,10.))
pfig = addImagePlot( pfig, R, fileout, gridOn=True )
plt.show()
| mit |
jwiggins/scikit-image | skimage/future/graph/rag.py | 2 | 14784 | import networkx as nx
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy import ndimage as ndi
import math
from ... import draw, measure, segmentation, util, color
try:
from matplotlib import colors
from matplotlib import cm
except ImportError:
pass
def min_weight(graph, src, dst, n):
"""Callback to handle merging nodes by choosing minimum weight.
Returns either the weight between (`src`, `n`) or (`dst`, `n`)
in `graph` or the minimum of the two when both exist.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The verices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
weight : float
The weight between (`src`, `n`) or (`dst`, `n`) in `graph` or the
minimum of the two when both exist.
"""
# cover the cases where n only has edge to either `src` or `dst`
default = {'weight': np.inf}
w1 = graph[n].get(src, default)['weight']
w2 = graph[n].get(dst, default)['weight']
return min(w1, w2)
def _add_edge_filter(values, graph):
"""Create edge in `graph` between central element of `values` and the rest.
Add an edge between the middle element in `values` and
all other elements of `values` into `graph`. ``values[len(values) // 2]``
is expected to be the central value of the footprint used.
Parameters
----------
values : array
The array to process.
graph : RAG
The graph to add edges in.
Returns
-------
0 : float
Always returns 0. The return value is required so that `generic_filter`
can put it in the output array, but it is ignored by this filter.
"""
values = values.astype(int)
center = values[len(values) // 2]
for value in values:
if value != center and not graph.has_edge(center, value):
graph.add_edge(center, value)
return 0.
class RAG(nx.Graph):
"""
The Region Adjacency Graph (RAG) of an image, subclasses
`networx.Graph <http://networkx.github.io/documentation/latest/reference/classes.graph.html>`_
Parameters
----------
label_image : array of int
An initial segmentation, with each region labeled as a different
integer. Every unique value in ``label_image`` will correspond to
a node in the graph.
connectivity : int in {1, ..., ``label_image.ndim``}, optional
The connectivity between pixels in ``label_image``. For a 2D image,
a connectivity of 1 corresponds to immediate neighbors up, down,
left, and right, while a connectivity of 2 also includes diagonal
neighbors. See `scipy.ndimage.generate_binary_structure`.
data : networkx Graph specification, optional
Initial or additional edges to pass to the NetworkX Graph
constructor. See `networkx.Graph`. Valid edge specifications
include edge list (list of tuples), NumPy arrays, and SciPy
sparse matrices.
**attr : keyword arguments, optional
Additional attributes to add to the graph.
"""
def __init__(self, label_image=None, connectivity=1, data=None, **attr):
super(RAG, self).__init__(data, **attr)
if self.number_of_nodes() == 0:
self.max_id = 0
else:
self.max_id = max(self.nodes_iter())
if label_image is not None:
fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
ndi.generic_filter(
label_image,
function=_add_edge_filter,
footprint=fp,
mode='nearest',
output=as_strided(np.empty((1,), dtype=np.float_),
shape=label_image.shape,
strides=((0,) * label_image.ndim)),
extra_arguments=(self,))
def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True,
extra_arguments=[], extra_keywords={}):
"""Merge node `src` and `dst`.
The new combined node is adjacent to all the neighbors of `src`
and `dst`. `weight_func` is called to decide the weight of edges
incident on the new node.
Parameters
----------
src, dst : int
Nodes to be merged.
weight_func : callable, optional
Function to decide edge weight of edges incident on the new node.
For each neighbor `n` for `src and `dst`, `weight_func` will be
called as follows: `weight_func(src, dst, n, *extra_arguments,
**extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the
RAG object which is in turn a subclass of
`networkx.Graph`.
in_place : bool, optional
If set to `True`, the merged node has the id `dst`, else merged
node has a new id which is returned.
extra_arguments : sequence, optional
The sequence of extra positional arguments passed to
`weight_func`.
extra_keywords : dictionary, optional
The dict of keyword arguments passed to the `weight_func`.
Returns
-------
id : int
The id of the new node.
Notes
-----
If `in_place` is `False` the resulting node has a new id, rather than
`dst`.
"""
src_nbrs = set(self.neighbors(src))
dst_nbrs = set(self.neighbors(dst))
neighbors = (src_nbrs | dst_nbrs) - set([src, dst])
if in_place:
new = dst
else:
new = self.next_id()
self.add_node(new)
for neighbor in neighbors:
w = weight_func(self, src, new, neighbor, *extra_arguments,
**extra_keywords)
self.add_edge(neighbor, new, weight=w)
self.node[new]['labels'] = (self.node[src]['labels'] +
self.node[dst]['labels'])
self.remove_node(src)
if not in_place:
self.remove_node(dst)
return new
def add_node(self, n, attr_dict=None, **attr):
"""Add node `n` while updating the maximum node id.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n, attr_dict, **attr)
self.max_id = max(n, self.max_id)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between `u` and `v` while updating max node id.
.. seealso:: :func:`networkx.Graph.add_edge`."""
super(RAG, self).add_edge(u, v, attr_dict, **attr)
self.max_id = max(u, v, self.max_id)
def copy(self):
"""Copy the graph with its max node id.
.. seealso:: :func:`networkx.Graph.copy`."""
g = super(RAG, self).copy()
g.max_id = self.max_id
return g
def next_id(self):
"""Returns the `id` for the new node to be inserted.
The current implementation returns one more than the maximum `id`.
Returns
-------
id : int
The `id` of the new node to be inserted.
"""
return self.max_id + 1
def _add_node_silent(self, n):
"""Add node `n` without updating the maximum node id.
This is a convenience method used internally.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n)
def rag_mean_color(image, labels, connectivity=2, mode='distance',
sigma=255.0):
"""Compute the Region Adjacency Graph using mean colors.
Given an image and its initial segmentation, this method constructs the
corresponding Region Adjacency Graph (RAG). Each node in the RAG
represents a set of pixels within `image` with the same label in `labels`.
The weight between two adjacent regions represents how similar or
dissimilar two regions are depending on the `mode` parameter.
Parameters
----------
image : ndarray, shape(M, N, [..., P,] 3)
Input image.
labels : ndarray, shape(M, N, [..., P,])
The labelled image. This should have one dimension less than
`image`. If `image` has dimensions `(M, N, 3)` `labels` should have
dimensions `(M, N)`.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.generate_binary_structure`.
mode : {'distance', 'similarity'}, optional
The strategy to assign edge weights.
'distance' : The weight between two adjacent regions is the
:math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
colors of the two regions. It represents the Euclidean distance in
their average color.
'similarity' : The weight between two adjacent is
:math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
:math:`c_1` and :math:`c_2` are the mean colors of the two regions.
It represents how similar two regions are.
sigma : float, optional
Used for computation when `mode` is "similarity". It governs how
close to each other two colors should be, for their corresponding edge
weight to be significant. A very large value of `sigma` could make
any two colors behave as though they were similar.
Returns
-------
out : RAG
The region adjacency graph.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274
"""
graph = RAG(labels, connectivity=connectivity)
for n in graph:
graph.node[n].update({'labels': [n],
'pixel count': 0,
'total color': np.array([0, 0, 0],
dtype=np.double)})
for index in np.ndindex(labels.shape):
current = labels[index]
graph.node[current]['pixel count'] += 1
graph.node[current]['total color'] += image[index]
for n in graph:
graph.node[n]['mean color'] = (graph.node[n]['total color'] /
graph.node[n]['pixel count'])
for x, y, d in graph.edges_iter(data=True):
diff = graph.node[x]['mean color'] - graph.node[y]['mean color']
diff = np.linalg.norm(diff)
if mode == 'similarity':
d['weight'] = math.e ** (-(diff ** 2) / sigma)
elif mode == 'distance':
d['weight'] = diff
else:
raise ValueError("The mode '%s' is not recognised" % mode)
return graph
def draw_rag(labels, rag, img, border_color=None, node_color='#ffff00',
edge_color='#00ff00', colormap=None, thresh=np.inf,
desaturate=False, in_place=True):
"""Draw a Region Adjacency Graph on an image.
Given a labelled image and its corresponding RAG, draw the nodes and edges
of the RAG on the image with the specified colors. Nodes are marked by
the centroids of the corresponding regions.
Parameters
----------
labels : ndarray, shape (M, N)
The labelled image.
rag : RAG
The Region Adjacency Graph.
img : ndarray, shape (M, N, 3)
Input image.
border_color : colorspec, optional
Any matplotlib colorspec.
node_color : colorspec, optional
Any matplotlib colorspec. Yellow by default.
edge_color : colorspec, optional
Any matplotlib colorspec. Green by default.
colormap : colormap, optional
Any matplotlib colormap. If specified the edges are colormapped with
the specified color map.
thresh : float, optional
Edges with weight below `thresh` are not drawn, or considered for color
mapping.
desaturate : bool, optional
Convert the image to grayscale before displaying. Particularly helps
visualization when using the `colormap` option.
in_place : bool, optional
If set, the RAG is modified in place. For each node `n` the function
will set a new attribute ``rag.node[n]['centroid']``.
Returns
-------
out : ndarray, shape (M, N, 3)
The image with the RAG drawn.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.coffee()
>>> labels = segmentation.slic(img)
>>> g = graph.rag_mean_color(img, labels)
>>> out = graph.draw_rag(labels, g, img)
"""
if not in_place:
rag = rag.copy()
if desaturate:
img = color.rgb2gray(img)
img = color.gray2rgb(img)
out = util.img_as_float(img, force_copy=True)
cc = colors.ColorConverter()
edge_color = cc.to_rgb(edge_color)
node_color = cc.to_rgb(node_color)
# Handling the case where one node has multiple labels
# offset is 1 so that regionprops does not ignore 0
offset = 1
map_array = np.arange(labels.max() + 1)
for n, d in rag.nodes_iter(data=True):
for label in d['labels']:
map_array[label] = offset
offset += 1
rag_labels = map_array[labels]
regions = measure.regionprops(rag_labels)
for (n, data), region in zip(rag.nodes_iter(data=True), regions):
data['centroid'] = region['centroid']
if border_color is not None:
border_color = cc.to_rgb(border_color)
out = segmentation.mark_boundaries(out, rag_labels, color=border_color)
if colormap is not None:
edge_weight_list = [d['weight'] for x, y, d in
rag.edges_iter(data=True) if d['weight'] < thresh]
norm = colors.Normalize()
norm.autoscale(edge_weight_list)
smap = cm.ScalarMappable(norm, colormap)
for n1, n2, data in rag.edges_iter(data=True):
if data['weight'] >= thresh:
continue
r1, c1 = map(int, rag.node[n1]['centroid'])
r2, c2 = map(int, rag.node[n2]['centroid'])
line = draw.line(r1, c1, r2, c2)
if colormap is not None:
out[line] = smap.to_rgba([data['weight']])[0][:-1]
else:
out[line] = edge_color
circle = draw.circle(r1, c1, 2)
out[circle] = node_color
return out
| bsd-3-clause |
cyliustack/sofa | bin/sofa_analyze.py | 1 | 50661 | import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
sample_time = (1 / float(cfg.sys_mon_rate))
while t < t_end:
t = t + sample_time
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - sample_time
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_idl = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(sample_time*int(ratios[1])/100.0)
mp_sys.append(sample_time*int(ratios[2])/100.0)
mp_idl.append(sample_time*int(ratios[3])/100.0)
mp_iow.append(sample_time*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_idl = np.asarray(mp_idl)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
else:
total_elapsed_time['idl'] += sample_time
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
mp_idl.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
if num_gpus > 0:
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
#diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
diskstat_dev = diskstat_table.index.format()
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
print('Complete!!')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = tmp_dir[0:-1] + '-' + ip + '/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
| apache-2.0 |
equialgo/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
Insight-book/data-science-from-scratch | first-edition/code/gradient_descent.py | 53 | 5895 | from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
| unlicense |
Dapid/pywt | demo/dwt_signal_decomposition.py | 1 | 1789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pywt
ecg = np.load(os.path.join('data', 'ecg.npy'))
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.MODES.sp1
def plot_signal_decomp(data, w, title):
"""Decompose and plot a signal S.
S = An + Dn + Dn-1 + ... + D1
"""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
fig = plt.figure()
ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, y in enumerate(rec_a):
ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
ax.plot(y, 'r')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("A%d" % (i + 1))
for i, y in enumerate(rec_d):
ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
ax.plot(y, 'g')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("D%d" % (i + 1))
plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity")
plot_signal_decomp(data2, 'sym5', "DWT: Frequency and phase change - Symmlets5")
plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5")
plt.show()
| mit |
keflavich/APEX_CMZ_H2CO | plot_codes/parameter_comparisons.py | 2 | 20997 | import matplotlib
import paths
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
import os
import pylab as pl
from astropy import table
from paths import analysispath
import numpy as np
from astropy import coordinates
from astropy import units as u
import heating
pcfittable = table.Table.read(os.path.join(analysispath,
'fitted_line_parameters_Chi2Constraints.ipac'),
format='ascii.ipac')
lolim = pcfittable['tmax1sig_chi2'] > 340
maps = np.char.startswith(pcfittable['Source_Name'], 'Map')
ok = ~np.isnan(pcfittable['tmin1sig_chi2']) & (pcfittable['width'] < 40) & (pcfittable['h2coratio321303']/pcfittable['eh2coratio321303'] > 5) & pcfittable['is_good'].astype('bool')
flags = {'is_map': maps,
'is_lolim': lolim,
'is_ok': ok}
# Don't plot these for now...
pcfittable = pcfittable[(~lolim) & ok]
maps = np.char.startswith(pcfittable['Source_Name'], 'Map')
lolim_conservative = pcfittable['tmax1sig_chi2'] > 150
fig4 = pl.figure(4)
fig4.clf()
ax = fig4.add_subplot(1,3,1)
ax.errorbar(pcfittable['temperature_chi2'], pcfittable['density_chi2'],
yerr=[pcfittable['density_chi2']-pcfittable['dmin1sig_chi2'],
pcfittable['dmax1sig_chi2']-pcfittable['density_chi2']],
xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
linestyle='none', marker='s', linewidth=1, alpha=0.5)
ax2 = fig4.add_subplot(1,3,2)
# Don't do this any more: it relies on having the RADEX fits, which we don't.
#ax2.errorbar(pcfittable['temperature_chi2'], pcfittable['temperature'],
# yerr=pcfittable['etemperature'],
# xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
# pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
# linestyle='none', marker='s', linewidth=1, alpha=0.5)
ax2.plot([0,300],[0,300],'k--',linewidth=2,alpha=0.5)
fig5 = pl.figure(5)
fig5.clf()
ax5 = fig5.gca()
ax5.errorbar(coordinates.Angle(pcfittable['GLON']*u.deg).wrap_at(180*u.deg).value[maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
capsize=0, markeredgecolor='none',
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax5.set_ylim(0,150)
ax5.set_ylabel("Temperature (K)")
ax5.set_xlabel("Galactic Longitude ($^{\\circ}$)")
fig5.savefig(paths.fpath('chi2_temperature_vs_glon_byfield.pdf'),
bbox_inches='tight')
ax5.errorbar(coordinates.Angle(pcfittable['GLON']*u.deg).wrap_at(180*u.deg).value[~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
capsize=0, markeredgecolor='none',
linestyle='none', marker='s', linewidth=1, alpha=0.5)
fig5.savefig(paths.fpath('chi2_temperature_vs_glon_fieldsandsources.pdf'),
bbox_inches='tight')
fig6 = pl.figure(6)
fig6.clf()
ax6 = fig6.gca()
mask = maps&~lolim_conservative
ax6.errorbar(pcfittable['higaldusttem'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r', capsize=0)
ax6.plot([15,30],[15,30],'k--')
mask = maps&lolim_conservative
ax6.plot(pcfittable['higaldusttem'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.5,
linestyle='none')
ax6.set_xlabel("HiGal Dust Temperature (K)")
ax6.set_ylabel("H$_2$CO Temperature (K)")
ax6.set_ylim(0,200)
ax6.set_xlim(15,30)
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax6.errorbar(pcfittable['higaldusttem'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.5, color='b')
mask = (~maps)&lolim_conservative
ax6.plot(pcfittable['higaldusttem'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.5,
linestyle='none')
ax6.set_ylim(10,150)
ax6.set_xlim(15,30)
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_fieldsandsources_notitle.pdf'),
bbox_inches='tight')
ax6.set_title("Hand-selected regions")
fig6.savefig(paths.fpath('chi2_temperature_vs_higaltemperature_fieldsandsources.pdf'),
bbox_inches='tight')
fig7 = pl.figure(7)
fig7.clf()
ax7 = fig7.gca()
mask = maps&~lolim_conservative
ax7.errorbar(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markersize=10,
markeredgecolor='none',
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='r')
mask = maps&lolim_conservative
ax7.plot(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.4,
linestyle='none')
linewidths = np.linspace(0,pcfittable['width'].max())*u.km/u.s
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='k', label='$n=10^4$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 10*u.pc,
1*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='r', label='$n=10^4$ cm$^{-3}$, $dv/dr=1$', zorder=-5, linewidth=2, alpha=0.5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**4*u.cm**-3, sigma, 20*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='--', color='b', label='$n=10^4$ cm$^{-3}$, $L=20$ pc', zorder=-5, alpha=0.5, linewidth=2)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle=':', color='k', label='$n=10^5$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**6*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K)
for sigma in linewidths],
linestyle='-.', color='k', label='$n=10^6$ cm$^{-3}$', zorder=-5)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K, crir=1e-15*u.s**-1)
for sigma in linewidths],
linestyle='-', color='g', label='$n=10^5$ cm$^{-3}$, $\zeta_{CR}=10^{-15}$ s$^{-1}$', zorder=-10, alpha=0.25, linewidth=4)
ax7.plot(linewidths*2.35, [heating.tkin_all(10**5*u.cm**-3, sigma, 10*u.pc,
5*u.km/u.s/u.pc, 30*u.K, crir=1e-14*u.s**-1)
for sigma in linewidths],
linestyle=':', color='purple', label='$n=10^5$ cm$^{-3}$, $\zeta_{CR}=10^{-14}$ s$^{-1}$', zorder=-10, alpha=0.25, linewidth=4)
box = ax7.get_position()
ax7.set_position([box.x0, box.y0, box.width * 0.7, box.height])
ax7.legend(loc='center left', fontsize=16, bbox_to_anchor=(1.0, 0.75))
ax7.set_xlabel("Line FWHM (km s$^{-1}$)")
ax7.set_ylabel("Temperature (K)")
ax7.set_ylim(10,150)
fig7.savefig(paths.fpath('chi2_temperature_vs_linewidth_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax7.errorbar(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='b')
mask = (~maps)&lolim_conservative
ax7.plot(pcfittable['width'][mask]*(8*np.log(2))**0.5,
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.4,
linestyle='none')
ax7.set_ylim(10,150)
fig7.savefig(paths.fpath('chi2_temperature_vs_linewidth_fieldsandsources.pdf'),
bbox_inches='tight')
fig8 = pl.figure(8)
fig8.clf()
ax8 = fig8.gca()
ax8.errorbar(pcfittable['ampH2CO'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax8.set_xlabel("H2CO Peak Amplitude")
ax8.set_ylabel("Temperature (K)")
fig8.savefig(paths.fpath('chi2_temperature_vs_h2coamp_byfield.pdf'),
bbox_inches='tight')
ax8.errorbar(pcfittable['ampH2CO'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig8.savefig(paths.fpath('chi2_temperature_vs_h2coamp_fieldsandsources.pdf'),
bbox_inches='tight')
fig9 = pl.figure(9)
fig9.clf()
ax9 = fig9.gca()
ax9.set_xscale('log')
ax9.errorbar(pcfittable['higalcolumndens'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax9.set_xlabel("Hi-Gal Fitted Column Density")
ax9.set_ylabel("Temperature (K)")
fig9.savefig(paths.fpath('chi2_temperature_vs_higalcolumn_byfield.pdf'),
bbox_inches='tight')
ax9.errorbar(pcfittable['higalcolumndens'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig9.savefig(paths.fpath('chi2_temperature_vs_higalcolumn_fieldsandsources.pdf'),
bbox_inches='tight')
fig10 = pl.figure(10)
fig10.clf()
ax10 = fig10.gca()
ax10.errorbar(pcfittable['width'][maps]*(8*np.log(2))**0.5,
pcfittable['h2coratio321303'][maps],
yerr=pcfittable['eh2coratio321303'][maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax10.set_xlabel("Line FWHM (km s$^{-1}$)")
ax10.set_ylabel("Ratio 321/303")
fig10.savefig(paths.fpath('ratio_vs_linewidth_byfield.pdf'),
bbox_inches='tight')
ax10.errorbar(pcfittable['width'][~maps]*(8*np.log(2))**0.5,
pcfittable['h2coratio321303'][~maps],
yerr=pcfittable['eh2coratio321303'][~maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig10.savefig(paths.fpath('ratio_vs_linewidth_fieldsandsources.pdf'),
bbox_inches='tight')
fig11 = pl.figure(11)
fig11.clf()
ax11 = fig11.gca()
ax11.errorbar(pcfittable['higaldusttem'][maps],
pcfittable['h2coratio321303'][maps],
yerr=pcfittable['eh2coratio321303'][maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax11.set_ylim(0,200)
ax11.set_xlim(15,30)
ax11.set_xlabel("HiGal Fitted Temperature")
ax11.set_ylabel("Ratio 321/303")
fig11.savefig(paths.fpath('ratio_vs_higaltemperature_byfield.pdf'),
bbox_inches='tight')
ax11.errorbar(pcfittable['higaldusttem'][~maps],
pcfittable['h2coratio321303'][~maps],
yerr=pcfittable['eh2coratio321303'][~maps],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig11.savefig(paths.fpath('ratio_vs_higaltemperature_fieldsandsources.pdf'),
bbox_inches='tight')
# RADEX fitting has been removed
#fig12 = pl.figure(12)
#fig12.clf()
#ax = fig12.add_subplot(1,1,1)
#ax.errorbar(pcfittable['temperature_chi2'], pcfittable['temperature'],
# yerr=pcfittable['etemperature'],
# xerr=[pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'],
# pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2']],
# linestyle='none', marker='s', linewidth=1, alpha=0.5)
#ax.plot([0,300],[0,300],'k--',linewidth=2,alpha=0.5)
#ax.set_title("DEBUG: RADEX+pyspeckit-fitted temperature vs. $\\chi^2$ temperature")
#ax.set_xlabel("$\\chi^2$ Temperature")
#ax.set_ylabel("RADEX+pyspeckit Temperature")
#ax.axis([0,350,0,350])
fig13 = pl.figure(13)
fig13.clf()
ax13 = fig13.gca()
ax13.errorbar(pcfittable['area'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[pcfittable['temperature_chi2'][maps]-pcfittable['tmin1sig_chi2'][maps],
pcfittable['tmax1sig_chi2'][maps]-pcfittable['temperature_chi2'][maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
ax13.set_xlabel("Area (square degrees)")
ax13.set_ylabel("Temperature (K)")
ax13.set_xscale('log')
fig13.savefig(paths.fpath('temperature_vs_area_byfield.pdf'),
bbox_inches='tight')
ax13.errorbar(pcfittable['area'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[pcfittable['temperature_chi2'][~maps]-pcfittable['tmin1sig_chi2'][~maps],
pcfittable['tmax1sig_chi2'][~maps]-pcfittable['temperature_chi2'][~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig13.savefig(paths.fpath('temperature_vs_area_fieldsandsources.pdf'),
bbox_inches='tight')
fig14 = pl.figure(14)
fig14.clf()
ax14 = fig14.gca()
ax14.errorbar(pcfittable['higalcolumndens'][maps],
pcfittable['temperature_chi2'][maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='r')
#ax14.plot([15,30],[15,30],'k--')
ax14.set_xlabel("HiGal Fitted Column Density")
ax14.set_ylabel("Temperature (K)")
fig14.savefig(paths.fpath('chi2_temperature_vs_higaldustcol_byfield.pdf'),
bbox_inches='tight')
ax14.errorbar(pcfittable['higalcolumndens'][~maps],
pcfittable['temperature_chi2'][~maps],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[~maps],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[~maps]],
linestyle='none', marker='s', linewidth=1, alpha=0.5, color='b')
fig14.savefig(paths.fpath('chi2_temperature_vs_higaldustcol_fieldsandsources.pdf'),
bbox_inches='tight')
# pcfittable[np.abs(pcfittable['temperature_chi2']-pcfittable['higaldusttem'])/pcfittable['higaldusttem'] < 1.5].pprint()
fig15 = pl.figure(15)
fig15.clf()
ax15 = fig15.gca()
mask = maps&~lolim_conservative
ax15.errorbar(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markersize=10,
markeredgecolor='none',
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='r')
mask = maps&lolim_conservative
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='r',
alpha=0.4,
linestyle='none')
mask = (maps) & (~lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']) | (pcfittable['tmax1sig_chi2'] < pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
marker='s',
markersize=15,
markeredgecolor='r',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
mask = (maps) & (lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=15,
markeredgecolor='r',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
# Sources with T_predicted >> T_measured
#high_badpredictions = (pcfittable['tkin_turb'] > pcfittable['tmax1sig_chi2'])&(~lolim_conservative)
#high_badpredictions = (pcfittable['tkin_turb'] > 120)&(~lolim_conservative)
#for row,is_map in zip(pcfittable[high_badpredictions], maps[high_badpredictions]):
# xy = np.array((row['tkin_turb'], row['temperature_chi2']))
# ax15.annotate("{0}_{1}".format(row['Source_Name'], row['ComponentID']),
# xy,
# xytext=xy-(15, 7),
# color='r' if is_map else 'b'
# )
ax15.plot([0,200], [0,200], 'k--', alpha=0.5, zorder=-5)
ax15.set_xlabel("Turbulence-driven Temperature (K)")
ax15.set_ylabel("H$_2$CO Temperature (K)")
ax15.set_ylim(10,150)
ax15.set_xlim(10,180)
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_byfield.pdf'),
bbox_inches='tight')
mask = (~maps)&(~lolim_conservative)
ax15.errorbar(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
yerr=[(pcfittable['temperature_chi2']-pcfittable['tmin1sig_chi2'])[mask],
(pcfittable['tmax1sig_chi2']-pcfittable['temperature_chi2'])[mask]],
capsize=0,
markeredgecolor='none',
markersize=10,
linestyle='none', marker='s', linewidth=0.5, alpha=0.6, color='b')
mask = (~maps)&lolim_conservative
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=10,
markeredgecolor='none',
color='b',
alpha=0.4,
linestyle='none')
mask = (~maps) & (~lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']) | (pcfittable['tmax1sig_chi2'] < pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['temperature_chi2'][mask],
marker='s',
markersize=15,
markeredgecolor='b',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
mask = (~maps) & (lolim_conservative) & ((pcfittable['tmin1sig_chi2'] > pcfittable['tkin_turb']))
ax15.plot(pcfittable['tkin_turb'][mask],
pcfittable['tmin1sig_chi2'][mask],
marker='^',
markersize=15,
markeredgecolor='b',
markerfacecolor='none',
markeredgewidth=0.5,
alpha=0.4,
linestyle='none')
ax15.set_ylim(10,150)
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_fieldsandsources_notitle.pdf'),
bbox_inches='tight')
ax15.set_title("Hand-selected regions")
fig15.savefig(paths.fpath('chi2_temperature_vs_turbulenttemperature_fieldsandsources.pdf'),
bbox_inches='tight')
| bsd-3-clause |
ilastikdev/opengm | src/interfaces/python/examples/potts_gui.py | 14 | 1100 | import numpy
import opengm
import vigra
import matplotlib.pyplot as plt
import matplotlib.cm as cm
gradScale = 0.1
energyNotEqual = 0.2
sigma=0.2
resizeFactor=2
img=vigra.impex.readImage('lena.bmp')
shape=img.shape
imgLab=vigra.colors.transform_RGB2Lab(img)
shape=(shape[0]*resizeFactor,shape[1]*resizeFactor)
imgLab=vigra.sampling.resize(imgLab, shape,order=3)
gradMag=vigra.filters.gaussianGradientMagnitude(imgLab,gradScale)
unaries=numpy.zeros([shape[0],shape[1],2])
unaries[:,:,1]=numpy.exp(-1.0*gradMag[:,:,0]*sigma)
unaries[:,:,0]=1.0-unaries[:,:,1]
regularizer=opengm.PottsFunction([2,2],0.0,energyNotEqual)
gm=opengm.grid2d2Order(unaries=unaries,regularizer=regularizer,order='numpy',operator='adder')
inf=opengm.inference.GraphCut(gm)
inf.infer()
argmin=inf.arg().reshape(shape[0:2])
plt.figure(1)
ax=plt.subplot(2,1,1)
plt.imshow(unaries[:,:,1].T, interpolation="nearest")
plt.set_cmap(cm.copper)
plt.colorbar()
ax.set_title('costs / unaries label=1')
ax=plt.subplot(2,1,2)
plt.imshow(argmin.T, interpolation="nearest")
plt.colorbar()
ax.set_title('argmin')
plt.show()
| mit |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wxagg.py | 70 | 9051 | from __future__ import division
"""
backend_wxagg.py
A wxPython backend for Agg. This uses the GUI widgets written by
Jeremy O'Donoghue (jeremy@o-donoghue.com) and the Agg backend by John
Hunter (jdhunter@ace.bsd.uchicago.edu)
Copyright (C) 2003-5 Jeremy O'Donoghue, John Hunter, Illinois Institute of
Technology
License: This work is licensed under the matplotlib license( PSF
compatible). A copy should be included with this source code.
"""
import wx
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
import backend_wx
from backend_wx import FigureManager, FigureManagerWx, FigureCanvasWx, \
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, error_msg_wx, \
draw_if_interactive, show, Toolbar, backend_version
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.BeginDrawing()
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.EndDrawing()
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWxAgg(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
#
# agg/wxPython image conversion functions (wxPython <= 2.6)
#
def _py_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
if bbox is None:
# agg => rgb -> image
return image
else:
# agg => rgb -> image => bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_clipped_image_as_bitmap(image, bbox))
def _py_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image => bitmap
return wx.BitmapFromImage(_py_convert_agg_to_wx_image(agg, None))
else:
# agg => rgb -> image => bitmap => clipped bitmap
return _clipped_image_as_bitmap(
_py_convert_agg_to_wx_image(agg, None),
bbox)
def _clipped_image_as_bitmap(image, bbox):
"""
Convert the region of a wx.Image bounded by bbox to a wx.Bitmap.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromImage(image)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(image.GetHeight() - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _py_WX28_convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wx.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _py_WX28_convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.get_bounds()
r = l + width
t = b + height
srcBmp = wx.BitmapFromBufferRGBA(int(agg.width), int(agg.height),
agg.buffer_rgba(0, 0))
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wx.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
destDC.BeginDrawing()
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
destDC.EndDrawing()
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
def _use_accelerator(state):
"""
Enable or disable the WXAgg accelerator, if it is present and is also
compatible with whatever version of wxPython is in use.
"""
global _convert_agg_to_wx_image
global _convert_agg_to_wx_bitmap
if getattr(wx, '__version__', '0.0')[0:3] < '2.8':
# wxPython < 2.8, so use the C++ accelerator or the Python routines
if state and _wxagg is not None:
_convert_agg_to_wx_image = _wxagg.convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _wxagg.convert_agg_to_wx_bitmap
else:
_convert_agg_to_wx_image = _py_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_convert_agg_to_wx_bitmap
else:
# wxPython >= 2.8, so use the accelerated Python routines
_convert_agg_to_wx_image = _py_WX28_convert_agg_to_wx_image
_convert_agg_to_wx_bitmap = _py_WX28_convert_agg_to_wx_bitmap
# try to load the WXAgg accelerator
try:
import _wxagg
except ImportError:
_wxagg = None
# if it's present, use it
_use_accelerator(True)
| gpl-3.0 |
MartinDelzant/scikit-learn | examples/classification/plot_lda.py | 70 | 2413 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LinearDiscriminantAnalysis(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="Linear Discriminant Analysis with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="Linear Discriminant Analysis", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('Linear Discriminant Analysis vs. \
shrinkage Linear Discriminant Analysis (1 discriminative feature)')
plt.show()
| bsd-3-clause |
jmetzen/scikit-learn | examples/svm/plot_oneclass.py | 80 | 2338 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
pdamodaran/yellowbrick | yellowbrick/text/dispersion.py | 1 | 10916 | # yellowbrick.text.dispersion
# Implementations of lexical dispersions for text visualization.
#
# Author: Larry Gray
# Created: 2018-06-21 10:06
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dispersion.py [] lwgray@gmail.com $
"""
Implementation of lexical dispersion for text visualization
"""
##########################################################################
## Imports
##########################################################################
from collections import defaultdict
import itertools
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
import numpy as np
##########################################################################
## Dispersion Plot Visualizer
##########################################################################
class DispersionPlot(TextVisualizer):
"""
DispersionPlotVisualizer allows for visualization of the lexical dispersion
of words in a corpus. Lexical dispersion is a measure of a word's
homeogeneity across the parts of a corpus. This plot notes the occurences
of a word and how many words from the beginning it appears.
Parameters
----------
target_words : list
A list of target words whose dispersion across a corpus passed at fit
will be visualized.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, target_words, ax=None, colors=None, ignore_case=False,
annotate_docs=False, labels=None, colormap=None, **kwargs):
super(DispersionPlot, self).__init__(ax=ax, **kwargs)
self.labels = labels
self.colors = colors
self.colormap = colormap
self.target_words = target_words
self.ignore_case = ignore_case
self.annotate_docs = annotate_docs
def _compute_dispersion(self, text, y):
self.boundaries_ = []
offset = 0
if y is None:
y = itertools.repeat(None)
for doc, target in zip(text, y):
for word in doc:
if self.ignore_case:
word = word.lower()
# NOTE: this will find all indices if duplicate words are supplied
# In the case that word is not in target words, any empty list is
# returned and no data will be yielded
offset += 1
for y_coord in (self.indexed_words_ == word).nonzero()[0]:
y_coord = int(y_coord)
yield (offset, y_coord, target)
if self.annotate_docs:
self.boundaries_.append(offset)
self.boundaries_ = np.array(self.boundaries_, dtype=int)
def _check_missing_words(self, points):
for index in range(len(self.indexed_words_)):
if index in points[:,1]:
pass
else:
raise YellowbrickValueError((
"The indexed word '{}' is not found in "
"this corpus"
).format(self.indexed_words_[index]))
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
color_values = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.color)
colors = dict(zip(labels, color_values))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Define boundaries with a vertical line
if self.annotate_docs:
for xcoords in self.boundaries_:
self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed')
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for point, t in zip(points, target):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], marker='|',
c=colors[label], zorder=100, label=label)
self.ax.set_yticks(list(range(len(self.indexed_words_))))
self.ax.set_yticklabels(self.indexed_words_)
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls poof & poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
self.ax.set_ylim(-1, len(self.indexed_words_))
self.ax.set_title("Lexical Dispersion Plot")
self.ax.set_xlabel("Word Offset")
self.ax.grid(False)
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##########################################################################
## Quick Method
##########################################################################
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None,
labels=None, annotate_docs=False, ignore_case=False, **kwargs):
""" Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one-off analysis
Parameters
----------
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
kwargs : dict
Pass any additional keyword arguments to the super class.
Returns
-------
ax: matplotlib axes
Returns the axes that the plot was drawn on
"""
# Instantiate the visualizer
visualizer = DispersionPlot(
words, ax=ax, colors=colors, colormap=colormap,
ignore_case=ignore_case, labels=labels,
annotate_docs=annotate_docs, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus, y, **kwargs)
# Return the axes object on the visualizer
return visualizer.ax
| apache-2.0 |
robcarver17/systematictradingexamples | plots_for_perhaps/compareoptmethods.py | 1 | 22426 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, text, bar, subplots
import Image
def file_process(filename):
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig("/home/rob/%s.png" % filename,dpi=300)
fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50)
Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename)
Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename)
"""
compare:
handcrafting
bootstrapped
one shot
equal weights
market cap weights
"""
import pandas as pd
from datetime import datetime as dt
def read_ts_csv(fname, dindex="Date"):
data=pd.read_csv(fname)
dateindex=[dt.strptime(dx, "%d/%m/%y") for dx in list(data[dindex])]
data.index=dateindex
del(data[dindex])
return data
def calc_asset_returns(rawdata, tickers):
asset_returns=pd.concat([get_monthly_tr(tickname, rawdata) for tickname in tickers], axis=1)
asset_returns.columns=tickers
return asset_returns
def get_monthly_tr(tickname, rawdata):
total_returns=rawdata[tickname+"_TR"]
return (total_returns / total_returns.shift(1)) - 1.0
def portfolio_return(asset_returns, cash_weights):
index_returns=asset_returns.cumsum().ffill().diff()
cash_align = cash_weights.reindex(asset_returns.index, method="ffill")
cash_align[np.isnan(index_returns)]=0.0
cash_align[np.isnan(cash_align)]=0.0
vols=pd.ewmstd(asset_returns, span=100, min_periods=1)
riskweights=pd.DataFrame(cash_align.values / vols.values, index=vols.index)
riskweights.columns=asset_returns.columns
riskweights[np.isnan(riskweights)]=0.0
def _rowfix(x):
if all([y==0.0 for y in x]):
return x
sumx=sum(x)
return [y/sumx for y in x]
riskweights = riskweights.apply(_rowfix, axis=1)
portfolio_returns=asset_returns*riskweights
portfolio_returns[np.isnan(portfolio_returns)]=0.0
portfolio_returns=portfolio_returns.sum(axis=1)
return portfolio_returns
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import numpy as np
from datetime import datetime as dt
import datetime
from scipy.optimize import minimize
from copy import copy
import random
def correlation_matrix(returns):
"""
Calcs a correlation matrix using weekly returns from a pandas time series
We use weekly returns because otherwise end of day effects, especially over time zones, give
unrealistically low correlations
"""
asset_index=returns.cumsum().ffill()
asset_index=asset_index.resample('1W') ## Only want index, fill method is irrelevant
asset_index = asset_index - asset_index.shift(1)
return asset_index.corr().values
def create_dull_pd_matrix(dullvalue=0.0, dullname="A", startdate=pd.datetime(1970,1,1).date(), enddate=datetime.datetime.now().date(), index=None):
"""
create a single valued pd matrix
"""
if index is None:
index=pd.date_range(startdate, enddate)
dullvalue=np.array([dullvalue]*len(index))
ans=pd.DataFrame(dullvalue, index, columns=[dullname])
return ans
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def sigma_from_corr(std, corr):
sigma=std*corr*std
return sigma
def basic_opt(std,corr,mus):
number_assets=mus.shape[0]
sigma=sigma_from_corr(std, corr)
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
def neg_SR_riskfree(weights, sigma, mus, riskfree=0.005):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0] - riskfree
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def equalise_vols(returns, default_vol):
"""
Normalises returns so they have the in sample vol of defaul_vol (annualised)
Assumes daily returns
"""
factors=(default_vol/16.0)/returns.std(axis=0)
facmat=create_dull_pd_matrix(dullvalue=factors, dullname=returns.columns, index=returns.index)
norm_returns=returns*facmat
norm_returns.columns=returns.columns
return norm_returns
def offdiag_matrix(offvalue, nlength):
identity=np.diag([1.0]*nlength)
for x in range(nlength):
for y in range(nlength):
if x!=y:
identity[x][y]=offvalue
return identity
def get_avg_corr(sigma):
new_sigma=copy(sigma)
np.fill_diagonal(new_sigma,np.nan)
return np.nanmean(new_sigma)
def nearest_to_listvals(x, lvalues=[0.0, 0.25, 0.5, 0.75, 0.9]):
## return x rounded to nearest of lvalues
if len(lvalues)==1:
return lvalues[0]
d1=abs(x - lvalues[0])
d2=abs(x - lvalues[1])
if d1<d2:
return lvalues[0]
newlvalues=lvalues[1:]
return nearest_to_listvals(x, newlvalues)
def handcrafted(returns, equalisevols=True, default_vol=0.2):
"""
Handcrafted optimiser
"""
count_assets=len(returns.columns)
try:
assert equalisevols is True
assert count_assets<=3
except:
raise Exception("Handcrafting only works with equalised vols and 3 or fewer assets")
if count_assets<3:
## Equal weights
return [1.0/count_assets]*count_assets
est_corr=returns.corr().values
c1=nearest_to_listvals(est_corr[0][1])
c2=nearest_to_listvals(est_corr[0][2])
c3=nearest_to_listvals(est_corr[1][2])
wts_to_use=HANDCRAFTED_WTS[(HANDCRAFTED_WTS.c1==c1) & (HANDCRAFTED_WTS.c2==c2) & (HANDCRAFTED_WTS.c3==c3)].irow(0)
return [wts_to_use.w1, wts_to_use.w2, wts_to_use.w3]
def opt_shrinkage(returns, shrinkage_factors, equalisevols=True, default_vol=0.2):
"""
Returns the optimal portfolio for the dataframe returns using shrinkage
shrinkage_factors is a tuple, shrinkage of mean and correlation
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
(shrinkage_mean, shrinkage_corr)=shrinkage_factors
## Sigma matrix
## Use correlation and then convert back to variance
est_corr=use_returns.corr().values
avg_corr=get_avg_corr(est_corr)
prior_corr=offdiag_matrix(avg_corr, est_corr.shape[0])
sigma_corr=shrinkage_corr*prior_corr+(1-shrinkage_corr)*est_corr
cov_vector=use_returns.std().values
sigma=cov_vector*sigma_corr*cov_vector
## mus vector
avg_return=np.mean(use_returns.mean())
est_mus=np.array([use_returns[asset_name].mean() for asset_name in use_returns.columns], ndmin=2).transpose()
prior_mus=np.array([avg_return for asset_name in use_returns.columns], ndmin=2).transpose()
mus=shrinkage_mean*prior_mus+(1-shrinkage_mean)*est_mus
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
return ans['x']
def handcraft_equal(returns):
"""
dynamic handcrafting, equal weights only
"""
## RETURNS Correlation matrix
use_returns=equalise_vols(returns, default_vol=16.0)
## Sigma matrix = correlations
sigma=use_returns.cov()
sigma[sigma<0.0]=0.0
ungroupedreturns=dict([(x,returns[x]) for x in returns.columns])
tree_data=hc_sigma(sigma, ungroupedreturns)
tree_data=grouping_tree(tree_data)
weights=tree_to_weights(tree_data)
return weights
def hc_sigma(ungrouped_sigma, ungroupedreturns, groupdata=None):
"""
handcraft weights from sigma matrix
Algo:
- Find pair of assets with highest correlation
- Form them into a new group with equal weights
- The group becomes like a new asset
- Once we only have two assets left, stop.
Need to
"""
if len(ungroupedreturns)==1:
return groupdata[1]
if groupdata is None:
## first run
## groupdata stores grouping information
## To begin with each group just consists of one asset
groupdata=[[],list(ungrouped_sigma.columns)]
groupedreturns=dict()
## iteration
while len(ungroupedreturns)>0:
## current_sigma consists of the correlation of things we currently have
if len(ungroupedreturns)==1:
idx_list=[0]
else:
idx_list=find_highest_corr(ungrouped_sigma)
name_list=tuple([ungrouped_sigma.columns[idx] for idx in idx_list])
## pair those things up
(ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)=group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list)
new_returns=pd.concat(groupedreturns, axis=1)
new_sigma=new_returns.corr()
## recursive
return hc_sigma(new_sigma, groupedreturns, groupdata=[[],groupdata[0]])
def find_highest_corr(sigmat):
new_sigmat=copy(sigmat.values)
np.fill_diagonal(new_sigmat, -100.0)
(i,j)=np.unravel_index(new_sigmat.argmax(), new_sigmat.shape)
return (i,j)
def group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list):
"""
Group assets
"""
todelete=[]
names=[]
grouping=[]
group_returns=[]
weights=[1.0/len(idx_list)]*len(idx_list) ## could have more complex thing here...
for (itemweight,idx, iname) in zip(weights,idx_list, name_list):
gi=groupdata[1][idx]
grouping.append(gi)
gri=ungroupedreturns.pop(iname)
group_returns.append(gri*itemweight)
names.append(gri.name)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=0)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=1)
todelete.append(idx)
groupdata[0].append(grouping)
gr_returns=pd.concat(group_returns, axis=1)
gr_returns=gr_returns.sum(axis=1)
gr_returns.name="[%s]" % "+".join(names)
print "Pairing %s" % ", ".join(names)
groupedreturns[gr_returns.name]=gr_returns
groupdata[1]=[element for eindex, element in enumerate(groupdata[1]) if eindex not in todelete]
return (ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)
def grouping_tree(tree_data, sigma):
"""
Group branches of 2 into larger if possible
"""
pass
def corrs_in_group(group, sigma):
asset_list=sum(group, [])
littlesigma=sigma.loc[asset_list, asset_list]
def corr_from_leaf(leaf, sigma):
return sigma[leaf[0]][leaf[1]]
def tree_to_weights(tree_data):
"""
convert a tree into weights
"""
pass
def markosolver(returns, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Returns the optimal portfolio for the dataframe returns
If equalisemeans=True then assumes all assets have same return if False uses the asset means
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
Note if usemeans=True and equalisevols=True effectively assumes all assets have same sharpe ratio
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
## Sigma matrix
sigma=use_returns.cov().values
## Expected mean returns
est_mus=[use_returns[asset_name].mean() for asset_name in use_returns.columns]
missingvals=[np.isnan(x) for x in est_mus]
if equalisemeans:
## Don't use the data - Set to the average Sharpe Ratio
mus=[default_vol*default_SR]*returns.shape[1]
else:
mus=est_mus
mus=np.array(mus, ndmin=2).transpose()
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
wts=ans['x']
return wts
def bootstrap_portfolio(returns_to_bs, monte_carlo=200, monte_length=250, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation
We run monte_carlo numbers of bootstraps
Each one contains monte_length days drawn randomly, with replacement
(so *not* block bootstrapping)
The other arguments are passed to the optimisation function markosolver
Note - doesn't deal gracefully with missing data. Will end up downweighting stuff depending on how
much data is missing in each boostrap. You'll need to think about how to solve this problem.
"""
weightlist=[]
for unused_index in range(monte_carlo):
bs_idx=[int(random.uniform(0,1)*len(returns_to_bs)) for i in range(monte_length)]
returns=returns_to_bs.iloc[bs_idx,:]
weight=markosolver(returns, equalisemeans=equalisemeans, equalisevols=equalisevols, default_vol=default_vol, default_SR=default_SR)
weightlist.append(weight)
### We can take an average here; only because our weights always add up to 1. If that isn't true
### then you will need to some kind of renormalisation
theweights_mean=list(np.mean(weightlist, axis=0))
return theweights_mean
def optimise_over_periods(data, date_method, fit_method, rollyears=20, equalisemeans=False, equalisevols=True,
monte_carlo=100, monte_length=None, shrinkage_factors=(0.5, 0.5),
weightdf=None):
"""
Do an optimisation
Returns data frame of weights
Note if fitting in sample weights will be somewhat boring
Doesn't deal with eg missing data in certain subperiods
"""
if monte_length is None:
monte_length=int(len(data.index)*.1)
## Get the periods
fit_periods=generate_fitting_dates(data, date_method, rollyears=rollyears)
## Do the fitting
## Build up a list of weights, which we'll concat
weight_list=[]
for fit_tuple in fit_periods:
## Fit on the slice defined by first two parts of the tuple
period_subset_data=data[fit_tuple[0]:fit_tuple[1]]
## Can be slow, if bootstrapping, so indicate where we are
print "Fitting data for %s to %s" % (str(fit_tuple[2]), str(fit_tuple[3]))
if fit_method=="one_period":
weights=markosolver(period_subset_data, equalisemeans=equalisemeans, equalisevols=equalisevols)
elif fit_method=="bootstrap":
weights=bootstrap_portfolio(period_subset_data, equalisemeans=equalisemeans,
equalisevols=equalisevols, monte_carlo=monte_carlo,
monte_length=monte_length)
elif fit_method=="shrinkage":
weights=opt_shrinkage(period_subset_data, shrinkage_factors=shrinkage_factors, equalisevols=equalisevols)
elif fit_method=="fixed":
weights=[float(weightdf[weightdf.Country==ticker].Weight.values) for ticker in list(period_subset_data.columns)]
else:
raise Exception("Fitting method %s unknown" % fit_method)
## We adjust dates slightly to ensure no overlaps
dindex=[fit_tuple[2]+datetime.timedelta(seconds=1), fit_tuple[3]-datetime.timedelta(seconds=1)]
## create a double row to delineate start and end of test period
weight_row=pd.DataFrame([weights]*2, index=dindex, columns=data.columns)
weight_list.append(weight_row)
weight_df=pd.concat(weight_list, axis=0)
return weight_df
"""
Now we need to do this with expanding or rolling window
"""
"""
Generate the date tuples
"""
def generate_fitting_dates(data, date_method, rollyears=20):
"""
generate a list 4 tuples, one element for each year in the data
each tuple contains [fit_start, fit_end, period_start, period_end] datetime objects
the last period will be a 'stub' if we haven't got an exact number of years
date_method can be one of 'in_sample', 'expanding', 'rolling'
if 'rolling' then use rollyears variable
"""
start_date=data.index[0]
end_date=data.index[-1]
## generate list of dates, one year apart, including the final date
yearstarts=list(pd.date_range(start_date, end_date, freq="12M"))+[end_date]
## loop through each period
periods=[]
for tidx in range(len(yearstarts))[1:-1]:
## these are the dates we test in
period_start=yearstarts[tidx]
period_end=yearstarts[tidx+1]
## now generate the dates we use to fit
if date_method=="in_sample":
fit_start=start_date
elif date_method=="expanding":
fit_start=start_date
elif date_method=="rolling":
yearidx_to_use=max(0, tidx-rollyears)
fit_start=yearstarts[yearidx_to_use]
else:
raise Exception("don't recognise date_method %s" % date_method)
if date_method=="in_sample":
fit_end=end_date
elif date_method in ['rolling', 'expanding']:
fit_end=period_start
else:
raise Exception("don't recognise date_method %s " % date_method)
periods.append([fit_start, fit_end, period_start, period_end])
## give the user back the list of periods
return periods
rawdata=read_ts_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_data.csv")
refdata=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_ref.csv")
tickers=list(refdata[(refdata.EmorDEV=="DEV") & (refdata.Type=="Country")].Country.values) #mom 12bp
#tickers=list(refdata[refdata.Type=="Country"].Country.values) #mom 12bp
fix_hcweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devhcweights.csv")
fix_capweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devcapweights.csv")
fix_eqweights=pd.DataFrame(dict(Country=tickers, Weight=[1.0/len(tickers)]*len(tickers)))
data=calc_asset_returns(rawdata, tickers)
### IDEA: to boostrap the results
### Repeatedly draw from 'data' to make new pseudo series
oneperiodweights=optimise_over_periods(data, "expanding", "one_period", equalisemeans=False, equalisevols=True)
#bootstrapweights=optimise_over_periods(data, "expanding", "bootstrap", equalisemeans=True, equalisevols=True)
exposthcweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_hcweights, equalisemeans=True, equalisevols=True)
equalweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_eqweights, equalisemeans=True, equalisevols=True)
marketcapweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_capweights, equalisemeans=True, equalisevols=True)
index_returns=(1.0+data).cumprod().ffill()
last_return=index_returns.irow(-1).values
last_return=pd.DataFrame(np.array([last_return]*len(data)), data.index)
last_return.columns=data.columns
index_returns = index_returns / last_return
marketcapweights = marketcapweights.reindex(index_returns.index, method="ffill")
marketcapweights=marketcapweights*index_returns
marketcapweights=marketcapweights.ffill()
## portfolio, take out missing weights
p1=portfolio_return(data, oneperiodweights)[pd.datetime(1994,1,1):]
#p2=portfolio_return(data, bootstrapweights)
p3=portfolio_return(data, exposthcweights)[pd.datetime(1994,1,1):]
p4=portfolio_return(data, equalweights)[pd.datetime(1994,1,1):]
p5=portfolio_return(data, marketcapweights)[pd.datetime(1994,1,1):]
drag1=p3 - p1
drag2=p4 - p5
def stats(x):
ann_mean=x.mean()*12
ann_std = x.std()*(12**.5)
geo_mean = ann_mean - (ann_std**2)/2.0
sharpe = geo_mean / ann_std
return (ann_mean, ann_std, geo_mean, sharpe)
print stats(p1)
print stats(p3)
print stats(p4)
print stats(p5)
toplot=pd.concat([p1, p3, p4, p5], axis=1)
toplot.columns=["Optimised", "Handcraft", "Equal", "Market Cap"]
toplot.cumsum().plot()
show()
p1.cumsum().plot(color="black", ls="solid")
p3.cumsum().plot(color="gray", ls="solid")
p4.cumsum().plot(color="black", ls="dashed")
p5.cumsum().plot(color="gray", ls="dashed")
legend( ["Optimised", "Handcraft", "Equal", "Market Cap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethods")
show()
drag1.cumsum().plot(color="gray", ls="solid")
legend( [ "Handcraft vs MktCap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethodstracking")
show()
| gpl-2.0 |
nhuntwalker/astroML | book_figures/chapter1/fig_moving_objects.py | 3 | 1911 | """
SDSS Moving Object Data
-----------------------
Figure 1.8.
The orbital semimajor axis vs. the orbital inclination angle diagram for the
first 10,000 catalog entries from the SDSS Moving Object Catalog (after
applying several quality cuts). The gaps at approximately 2.5, 2.8, and 3.3 AU
are called the Kirkwood gaps and are due to orbital resonances with Jupiter.
The several distinct clumps are called asteroid families and represent remnants
from collisions of larger asteroids.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_moving_objects
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the moving object data
data = fetch_moving_objects(Parker2008_cuts=True)
# Use only the first 10000 points
data = data[:10000]
a = data['aprime']
sini = data['sin_iprime']
#------------------------------------------------------------
# Plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(a, sini, '.', markersize=2, color='black')
ax.set_xlim(2.0, 3.6)
ax.set_ylim(-0.01, 0.31)
ax.set_xlabel('Semimajor Axis (AU)')
ax.set_ylabel('Sine of Inclination Angle')
plt.show()
| bsd-2-clause |
waidyanatha/pingsam | visualize.py | 1 | 8668 | import numpy as np
import datetime as dtm
from dateutil import rrule
import pandas as pd
import csv
import matplotlib.pylab as plt
import sys, os
#lets first create the csv file
#
#change this to actual csv file name
pingfile="weeklylogs.csv"
#paramters @plotinterval = 10 minutes
plotinterval = 10
#csv file columns
col_seq=0
col_pingtime=1
col_domain=2
col_state=3
#
########## FUNCTION TO SYNTHESEIZE MISSING DATA POINTS ##########
#
def synth_data(synthdf, interval):
#create a temporary dataframe to hold the syntheseized data
tmpdf = pd.DataFrame(columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
#first check we have a none empty dataframe
if not synthdf.empty:
#pick the originating TS data point
synthdf.sort_values(by='pingdatetime')
#check if first timestamp starts at 00:00:00; if not add a dumy record
startseqnum = synthdf.index[0]
startpingdt = synthdf.iloc[0]['pingdatetime']
startdomain = synthdf.iloc[0]['domain']
startstate = synthdf.iloc[0]['statenow']
#loop through each TS data point to synthetically add new TS points
#to fill the gap between two consecutive data points
for i, row in synthdf.iterrows():
#initiate the synthesiezed data point to the origin
nextdatapoint = 0
pingdt_plus_interval = startpingdt
#stepwise loop to add syntheseized points from relative origin to the next TS data point
while row['pingdatetime'] > pingdt_plus_interval + dtm.timedelta(minutes = interval) :
nextdatapoint += 1
pingdt_plus_interval = startpingdt + dtm.timedelta(minutes = nextdatapoint*interval)
tmpdf.loc[len(tmpdf.index)] = [startseqnum,pingdt_plus_interval,startdomain,startstate]
startseqnum = i
startpingdt = row['pingdatetime']
startstate = row['statenow']
#after completing through all the TS datapoints check if a none empty dataframe was created
if not tmpdf.empty:
tmpdf = pd.concat([tmpdf,synthdf])
tmpdf = tmpdf.set_index('seqnum')
#whether null or not return a dataframe with syntheseized TS data
tmpdf.dropna(thresh=2)
return tmpdf
#
########## PLOT HISTOGRAM TO FIGURE ##########
#
def plot_hist_to_fig(histdf, dname):
#get date range of the plot to use in suptitile
begdt = histdf['pingdatetime'].min().date()
findt = histdf['pingdatetime'].max().date()
#create a new x-axis index using dataframe index; starting from 1 instead of 0
histdf['pingdate'] = histdf['pingdatetime'].apply(lambda x: x.date())
downdf = pd.DataFrame(columns=['xlabel','pingdate', 'downcount'])
datelist = list(histdf.pingdate.unique())
for uniquedate in datelist:
xlabel = str('{:02d}'.format(uniquedate.month))+'-'+str('{:02d}'.format(uniquedate.day))
downcount = len(histdf[(histdf.statenow == '0') & (histdf.pingdate == uniquedate)])
totalcount = len(histdf[(histdf.pingdate == uniquedate)])
downdf.loc[len(downdf.index)] = [xlabel, uniquedate,100*downcount//totalcount]
downdf = downdf.as_matrix()
#x-axis values are in the newly generated xvalues column
xl = np.array(downdf[:,0])
x = np.array(downdf[:,1])
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(downdf[:,2])
histfig, ax = plt.subplots()
ax.bar(x,y,color='red',width=0.5, align="center")
#to give enough spacing for the suptitle; otherwise overlaps with title
histfig.subplots_adjust(top=0.87)
# plt.figure(figsize=(8,6), dpi=150)
#beautify the plot and name the labels, titles
ax.set_title('Percentage of time Server Failed each Day', fontsize=14, fontweight='bold', color='gray')
histfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Month-Day', fontsize=12, color='gray')
ax.set_ylabel('Faile Rate (%)', fontsize=12, color='gray')
plt.yticks(fontsize=10, color='gray', rotation='horizontal')
plt.xticks(x, xl, fontsize=10, color='gray', rotation='vertical')
ax.grid(True)
return histfig
#
########## PLOT DOWN TIMES FREQUENCY TO FIGURE ##########
#
def plot_freq_to_fig(plotdf, dname):
#get date range of the plot to use in suptitile
begdt = plotdf['pingdatetime'].min().date()
findt = plotdf['pingdatetime'].max().date()
failrate = 100-(sum(100*plotdf['statenow'].astype(int))/len(plotdf))
failrate = failrate.astype(float)
#create a new x-axis index using dataframe index; starting from 1 instead of 0
plotdf['xvalues'] = range(1,len(plotdf)+1)
plotdf = plotdf.as_matrix()
#x-axis values are in the newly generated xvalues column
x = np.array(plotdf[:,3].astype(int))
#y-axis values (1 or 0) are in the dateframe statenow column
y = np.array(plotdf[:,2].astype(int))
#setup to catputure the plot into a figure
plotfig = plt.figure(num=None, figsize=(8, 6), dpi=150, facecolor='y', edgecolor='k')
ax = plotfig.add_subplot(311)
ax.fill_between(x, 0, y, color='green')
ax.plot(x,y,color='green',lw=2)
#to give enough spacing for the suptitle; otherwise overlaps with title
plotfig.subplots_adjust(top=0.87)
#beautify the plot and name the labels, titles
ax.set_title('Frequency of Server Access Failure ('+str(failrate)+'%)', fontsize=14, fontweight='bold', color='gray')
plotfig.suptitle(dname+'\n'+str(begdt)+' --- '+str(findt), fontsize=10, color='blue')
ax.set_xlabel('Attempted Machine Accesss Times', fontsize=12, color='gray')
ax.set_ylabel('Machine State', fontsize=12, color='gray')
plt.yticks(y, ['UP','DOWN'], fontsize=10, color='gray', rotation='vertical')
plt.xticks(fontsize=10, color='gray', rotation='horizontal')
plt.ylim(0,1.1)
plt.xlim(0,x.max()+10)
ax.grid(True)
return plotfig
#
############# MAIN ################################
#
print("Complile data from file the log files")
#os.system('./analytics.sh')
print("Reading data from file "+pingfile)
with open(pingfile, 'rb') as f:
data = [i.split(",") for i in f.read().split()]
df = pd.DataFrame(data, columns=['seqnum', 'pingdatetime', 'domain', 'statenow'])
for index, row in df.iterrows():
row[col_pingtime] = dtm.datetime.strptime(row[col_pingtime], '%Y-%m-%d:%H:%M:%S')
#to avoid duplicate data and to reflect ping time to be on the minute
row[col_pingtime] = row[col_pingtime].replace(second = 0)
#format pingdatetime as proper datetime, set it as the indext and then order them
df['pingdatetime'] = pd.to_datetime(df['pingdatetime'])
df.sort_values(by='pingdatetime')
df = df.set_index('seqnum')
#begin processing for each unique domain
print(str(len(df.index))+" data rows added to the dataframe, ready for processing ...")
print ('-----------------------------------------------------')
for thedomain in df.domain.unique():
#insert syntheseised data points
dompingdf = df[df['domain']==thedomain]
print("Begin data synthesis for "+thedomain+" with data rows = "+str(len(dompingdf.index)))
amenddf = synth_data(dompingdf,plotinterval)
if not amenddf.empty:
#output the syntheseized dataframe to output file
print(str(len(amenddf.index))+" data rows of syntheseised added to "+thedomain )
amenddf['pingdatetime'] = pd.to_datetime(amenddf.pingdatetime)
amenddf = amenddf.sort(['pingdatetime'])
amenddf.index = range(0,len(amenddf))
print('writing data to file: ./data/syndata_'+thedomain+'.csv')
amenddf.to_csv('./data/syndata_'+thedomain+'.csv')
#plot timeseries with function (need to add if conditions to check if function returns valid fig)
fig = plot_freq_to_fig(amenddf, thedomain)
fig.savefig('./plots/freqplot_'+thedomain+'.png', bbox_inches='tight')
print ('frequency plot created in file: ./plots/freqplot_'+thedomain+'.png')
fig = plot_hist_to_fig(amenddf, thedomain)
fig.savefig('./plots/histplot_'+thedomain+'.png', bbox_inches='tight')
print ('histogram plot created in file: ./plots/histplot_'+thedomain+'.png')
print ('process complete for '+thedomain)
print ('-----------------------------------------------------')
else:
print ("Warning: no syntheseized data was added to: "+thedomain)
print ('-----------------------------------------------------')
print ('End processing data for visualization !!! ')
| mit |
pbauman/libmesh | doc/statistics/libmesh_pagehits.py | 1 | 10542 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) Go to analytics.google.com.
# .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard.
# .) Click the "Audience Overview" link at the bottom right corner of this box.
# .) Adjust date range to previous month.
# .) Record the number of "Pageviews" in the "Hits" column below.
# The data below are from the libmesh.github.io site, which uses the
# number UA-24978333-1.
#
# Note: we do not have control over the analytics for the
# https://www.github.com/libMesh/libmesh page. If you look at the page
# source, analytics code UA-3769691-2 appears, but if I try to add
# this property in my analytics account, Google assigns me the number
# UA-24978333-{2,3,...} (where the last digit may change depending on
# how many times you tried to add/remove this property in the
# Analytics Dashboard) and there does not seem to be a straightforward
# way of inserting this code into the source. There have been some
# README.md based hacks for doing this in the past, but I don't think
# they are particularly reliable...
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
'Jan 2016', 11837, 0, 0.0,
'Feb 2016', 14102, 0, 0.0,
'Mar 2016', 13212, 0, 0.0,
'Apr 2016', 13355, 0, 0.0,
'May 2016', 12486, 0, 0.0,
'Jun 2016', 13973, 0, 0.0,
'Jul 2016', 10688, 0, 0.0,
'Aug 2016', 10048, 0, 0.0,
'Sep 2016', 10847, 0, 0.0,
'Oct 2016', 10984, 0, 0.0,
'Nov 2016', 12233, 0, 0.0,
'Dec 2016', 11430, 0, 0.0,
'Jan 2017', 10327, 0, 0.0,
'Feb 2017', 11039, 0, 0.0,
'Mar 2017', 12986, 0, 0.0,
'Apr 2017', 9773, 0, 0.0,
'May 2017', 10880, 0, 0.0,
'Jun 2017', 9179, 0, 0.0,
'Jul 2017', 8344, 0, 0.0,
'Aug 2017', 8617, 0, 0.0,
'Sep 2017', 8576, 0, 0.0,
'Oct 2017', 11255, 0, 0.0,
'Nov 2017', 10362, 0, 0.0,
'Dec 2017', 7948, 0, 0.0,
'Jan 2018', 9376, 0, 0.0,
'Feb 2018', 8864, 0, 0.0,
'Mar 2018', 10339, 0, 0.0,
'Apr 2018', 10958, 0, 0.0,
'May 2018', 10151, 0, 0.0,
'Jun 2018', 8981, 0, 0.0,
'Jul 2018', 8619, 0, 0.0,
'Aug 2018', 9226, 0, 0.0,
'Sep 2018', 8507, 0, 0.0,
'Oct 2018', 9150, 0, 0.0,
'Nov 2018', 8135, 0, 0.0,
'Dec 2018', 7522, 0, 0.0,
'Jan 2019', 8643, 0, 0.0,
'Feb 2019', 8729, 0, 0.0,
'Mar 2019', 7916, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
# The color used comes from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax.plot(date_nums, n_hits_month, marker='o', linewidth=2, color=u'#4878cf')
# Create title
fig.suptitle('libmesh.github.io Hits/Month (in Thousands)')
# Set up x-tick locations -- August of each year
ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
mfjb/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_2/fullgrid/Rest.py | 30 | 9192 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_Rest.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
andrewgiessel/folium | folium/utilities.py | 1 | 19979 | # -*- coding: utf-8 -*-
"""
Utilities
-------
Utility module for Folium helper functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import math
import zlib
import struct
import json
import base64
from jinja2 import Environment, PackageLoader
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from folium.six import iteritems, text_type, binary_type
def get_templates():
"""Get Jinja templates."""
return Environment(loader=PackageLoader('folium', 'templates'))
def legend_scaler(legend_values, max_labels=10.0):
"""
Downsamples the number of legend values so that there isn't a collision
of text on the legend colorbar (within reason). The colorbar seems to
support ~10 entries as a maximum.
"""
if len(legend_values) < max_labels:
legend_ticks = legend_values
else:
spacer = int(math.ceil(len(legend_values)/max_labels))
legend_ticks = []
for i in legend_values[::spacer]:
legend_ticks += [i]
legend_ticks += ['']*(spacer-1)
return legend_ticks
def linear_gradient(hexList, nColors):
"""
Given a list of hexcode values, will return a list of length
nColors where the colors are linearly interpolated between the
(r, g, b) tuples that are given.
Example:
linear_gradient([(0, 0, 0), (255, 0, 0), (255, 255, 0)], 100)
"""
def _scale(start, finish, length, i):
"""
Return the value correct value of a number that is in between start
and finish, for use in a loop of length *length*.
"""
base = 16
fraction = float(i) / (length - 1)
raynge = int(finish, base) - int(start, base)
thex = hex(int(int(start, base) + fraction * raynge)).split('x')[-1]
if len(thex) != 2:
thex = '0' + thex
return thex
allColors = []
# Separate (R, G, B) pairs.
for start, end in zip(hexList[:-1], hexList[1:]):
# Linearly intepolate between pair of hex ###### values and
# add to list.
nInterpolate = 765
for index in range(nInterpolate):
r = _scale(start[1:3], end[1:3], nInterpolate, index)
g = _scale(start[3:5], end[3:5], nInterpolate, index)
b = _scale(start[5:7], end[5:7], nInterpolate, index)
allColors.append(''.join(['#', r, g, b]))
# Pick only nColors colors from the total list.
result = []
for counter in range(nColors):
fraction = float(counter) / (nColors - 1)
index = int(fraction * (len(allColors) - 1))
result.append(allColors[index])
return result
def color_brewer(color_code, n=6):
"""
Generate a colorbrewer color scheme of length 'len', type 'scheme.
Live examples can be seen at http://colorbrewer2.org/
"""
maximum_n = 253
scheme_info = {'BuGn': 'Sequential',
'BuPu': 'Sequential',
'GnBu': 'Sequential',
'OrRd': 'Sequential',
'PuBu': 'Sequential',
'PuBuGn': 'Sequential',
'PuRd': 'Sequential',
'RdPu': 'Sequential',
'YlGn': 'Sequential',
'YlGnBu': 'Sequential',
'YlOrBr': 'Sequential',
'YlOrRd': 'Sequential',
'BrBg': 'Diverging',
'PiYG': 'Diverging',
'PRGn': 'Diverging',
'PuOr': 'Diverging',
'RdBu': 'Diverging',
'RdGy': 'Diverging',
'RdYlBu': 'Diverging',
'RdYlGn': 'Diverging',
'Spectral': 'Diverging',
'Accent': 'Qualitative',
'Dark2': 'Qualitative',
'Paired': 'Qualitative',
'Pastel1': 'Qualitative',
'Pastel2': 'Qualitative',
'Set1': 'Qualitative',
'Set2': 'Qualitative',
'Set3': 'Qualitative',
}
schemes = {'BuGn': ['#EDF8FB', '#CCECE6', '#CCECE6',
'#66C2A4', '#41AE76', '#238B45', '#005824'],
'BuPu': ['#EDF8FB', '#BFD3E6', '#9EBCDA',
'#8C96C6', '#8C6BB1', '#88419D', '#6E016B'],
'GnBu': ['#F0F9E8', '#CCEBC5', '#A8DDB5',
'#7BCCC4', '#4EB3D3', '#2B8CBE', '#08589E'],
'OrRd': ['#FEF0D9', '#FDD49E', '#FDBB84',
'#FC8D59', '#EF6548', '#D7301F', '#990000'],
'PuBu': ['#F1EEF6', '#D0D1E6', '#A6BDDB',
'#74A9CF', '#3690C0', '#0570B0', '#034E7B'],
'PuBuGn': ['#F6EFF7', '#D0D1E6', '#A6BDDB',
'#67A9CF', '#3690C0', '#02818A', '#016450'],
'PuRd': ['#F1EEF6', '#D4B9DA', '#C994C7',
'#DF65B0', '#E7298A', '#CE1256', '#91003F'],
'RdPu': ['#FEEBE2', '#FCC5C0', '#FA9FB5',
'#F768A1', '#DD3497', '#AE017E', '#7A0177'],
'YlGn': ['#FFFFCC', '#D9F0A3', '#ADDD8E',
'#78C679', '#41AB5D', '#238443', '#005A32'],
'YlGnBu': ['#FFFFCC', '#C7E9B4', '#7FCDBB',
'#41B6C4', '#1D91C0', '#225EA8', '#0C2C84'],
'YlOrBr': ['#FFFFD4', '#FEE391', '#FEC44F',
'#FE9929', '#EC7014', '#CC4C02', '#8C2D04'],
'YlOrRd': ['#FFFFB2', '#FED976', '#FEB24C',
'#FD8D3C', '#FC4E2A', '#E31A1C', '#B10026'],
'BrBg': ['#8c510a', '#d8b365', '#f6e8c3',
'#c7eae5', '#5ab4ac', '#01665e'],
'PiYG': ['#c51b7d', '#e9a3c9', '#fde0ef',
'#e6f5d0', '#a1d76a', '#4d9221'],
'PRGn': ['#762a83', '#af8dc3', '#e7d4e8',
'#d9f0d3', '#7fbf7b', '#1b7837'],
'PuOr': ['#b35806', '#f1a340', '#fee0b6',
'#d8daeb', '#998ec3', '#542788'],
'RdBu': ['#b2182b', '#ef8a62', '#fddbc7',
'#d1e5f0', '#67a9cf', '#2166ac'],
'RdGy': ['#b2182b', '#ef8a62', '#fddbc7',
'#e0e0e0', '#999999', '#4d4d4d'],
'RdYlBu': ['#d73027', '#fc8d59', '#fee090',
'#e0f3f8', '#91bfdb', '#4575b4'],
'RdYlGn': ['#d73027', '#fc8d59', '#fee08b',
'#d9ef8b', '#91cf60', '#1a9850'],
'Spectral': ['#d53e4f', '#fc8d59', '#fee08b',
'#e6f598', '#99d594', '#3288bd'],
'Accent': ['#7fc97f', '#beaed4', '#fdc086',
'#ffff99', '#386cb0', '#f0027f'],
'Dark2': ['#1b9e77', '#d95f02', '#7570b3',
'#e7298a', '#66a61e', '#e6ab02'],
'Paired': ['#a6cee3', '#1f78b4', '#b2df8a',
'#33a02c', '#fb9a99', '#e31a1c'],
'Pastel1': ['#fbb4ae', '#b3cde3', '#ccebc5',
'#decbe4', '#fed9a6', '#ffffcc'],
'Pastel2': ['#b3e2cd', '#fdcdac', '#cbd5e8',
'#f4cae4', '#e6f5c9', '#fff2ae'],
'Set1': ['#e41a1c', '#377eb8', '#4daf4a',
'#984ea3', '#ff7f00', '#ffff33'],
'Set2': ['#66c2a5', '#fc8d62', '#8da0cb',
'#e78ac3', '#a6d854', '#ffd92f'],
'Set3': ['#8dd3c7', '#ffffb3', '#bebada',
'#fb8072', '#80b1d3', '#fdb462'],
}
# Raise an error if the n requested is greater than the maximum.
if n > maximum_n:
raise ValueError("The maximum number of colors in a"
" ColorBrewer sequential color series is 253")
# Only if n is greater than six do we interpolate values.
if n > 6:
if color_code not in schemes:
color_scheme = None
else:
# Check to make sure that it is not a qualitative scheme.
if scheme_info[color_code] == 'Qualitative':
raise ValueError("Expanded color support is not available"
" for Qualitative schemes, restrict"
" number of colors to 6")
else:
color_scheme = linear_gradient(schemes.get(color_code), n)
else:
color_scheme = schemes.get(color_code, None)
return color_scheme
def transform_data(data):
"""
Transform Pandas DataFrame into JSON format.
Parameters
----------
data: DataFrame or Series
Pandas DataFrame or Series
Returns
-------
JSON compatible dict
Example
-------
>>> transform_data(df)
"""
if pd is None:
raise ImportError("The Pandas package is required"
" for this functionality")
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
def type_check(value):
"""
Type check values for JSON serialization. Native Python JSON
serialization will not recognize some Numpy data types properly,
so they must be explicitly converted.
"""
if pd.isnull(value):
return None
elif (isinstance(value, pd.tslib.Timestamp) or
isinstance(value, pd.Period)):
return time.mktime(value.timetuple())
elif isinstance(value, (int, np.integer)):
return int(value)
elif isinstance(value, (float, np.float_)):
return float(value)
elif isinstance(value, str):
return str(value)
else:
return value
if isinstance(data, pd.Series):
json_data = [{type_check(x): type_check(y) for
x, y in iteritems(data)}]
elif isinstance(data, pd.DataFrame):
json_data = [{type_check(y): type_check(z) for
x, y, z in data.itertuples()}]
return json_data
def split_six(series=None):
"""
Given a Pandas Series, get a domain of values from zero to the 90% quantile
rounded to the nearest order-of-magnitude integer. For example, 2100 is
rounded to 2000, 2790 to 3000.
Parameters
----------
series: Pandas series, default None
Returns
-------
list
"""
if pd is None:
raise ImportError("The Pandas package is required"
" for this functionality")
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
def base(x):
if x > 0:
base = pow(10, math.floor(math.log10(x)))
return round(x/base)*base
else:
return 0
quants = [0, 50, 75, 85, 90]
# Some weirdness in series quantiles a la 0.13.
arr = series.values
return [base(np.percentile(arr, x)) for x in quants]
def mercator_transform(data, lat_bounds, origin='upper', height_out=None):
"""Transforms an image computed in (longitude,latitude) coordinates into
the a Mercator projection image.
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
lat_bounds : length 2 tuple
Minimal and maximal value of the latitude of the image.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
height_out : int, default None
The expected height of the output.
If None, the height of the input is used.
"""
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
mercator = lambda x: np.arcsinh(np.tan(x*np.pi/180.))*180./np.pi
array = np.atleast_3d(data).copy()
height, width, nblayers = array.shape
lat_min, lat_max = lat_bounds
if height_out is None:
height_out = height
# Eventually flip the image
if origin == 'upper':
array = array[::-1, :, :]
lats = (lat_min + np.linspace(0.5/height, 1.-0.5/height, height) *
(lat_max-lat_min))
latslats = (mercator(lat_min) +
np.linspace(0.5/height_out, 1.-0.5/height_out, height_out) *
(mercator(lat_max)-mercator(lat_min)))
out = np.zeros((height_out, width, nblayers))
for i in range(width):
for j in range(4):
out[:, i, j] = np.interp(latslats, mercator(lats), array[:, i, j])
# Eventually flip the image.
if origin == 'upper':
out = out[::-1, :, :]
return out
def image_to_url(image, mercator_project=False, colormap=None,
origin='upper', bounds=((-90, -180), (90, 180))):
"""Infers the type of an image argument and transforms it into a URL.
Parameters
----------
image: string, file or array-like object
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the
output file.
* If array-like, it will be converted to PNG base64 string and
embedded in the output.
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0, 0] index of the array in the upper left or
lower left corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint : you can use colormaps from `matplotlib.cm`.
mercator_project : bool, default False, used for array-like image.
Transforms the data to project (longitude,latitude)
coordinates to the Mercator projection.
bounds: list-like, default ((-90, -180), (90, 180))
Image bounds on the map in the form
[[lat_min, lon_min], [lat_max, lon_max]].
Only used if mercator_project is True.
"""
if hasattr(image, 'read'):
# We got an image file.
if hasattr(image, 'name'):
# We try to get the image format from the file name.
fileformat = image.name.lower().split('.')[-1]
else:
fileformat = 'png'
url = "data:image/{};base64,{}".format(
fileformat, base64.b64encode(image.read()).decode('utf-8'))
elif (not (isinstance(image, text_type) or
isinstance(image, binary_type))) and hasattr(image, '__iter__'):
# We got an array-like object.
if mercator_project:
data = mercator_transform(image,
[bounds[0][0], bounds[1][0]],
origin=origin)
else:
data = image
png = write_png(data, origin=origin, colormap=colormap)
url = "data:image/png;base64," + base64.b64encode(png).decode('utf-8')
else:
# We got an URL.
url = json.loads(json.dumps(image))
return url.replace('\n', ' ')
def write_png(data, origin='upper', colormap=None):
"""
Transform an array of data into a PNG string.
This can be written to disk using binary I/O, or encoded using base64
for an inline PNG like this:
>>> png_str = write_png(array)
>>> "data:image/png;base64,"+png_str.encode('base64')
Inspired from
http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image
Parameters
----------
data: numpy array or equivalent list-like object.
Must be NxM (mono), NxMx3 (RGB) or NxMx4 (RGBA)
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between
0. and 1. Hint: you can use colormaps from `matplotlib.cm`.
Returns
-------
PNG formatted byte string
"""
if np is None:
raise ImportError("The NumPy package is required"
" for this functionality")
if colormap is None:
colormap = lambda x: (x, x, x, 1)
array = np.atleast_3d(data)
height, width, nblayers = array.shape
if nblayers not in [1, 3, 4]:
raise ValueError("Data must be NxM (mono), "
"NxMx3 (RGB), or NxMx4 (RGBA)")
assert array.shape == (height, width, nblayers)
if nblayers == 1:
array = np.array(list(map(colormap, array.ravel())))
nblayers = array.shape[1]
if nblayers not in [3, 4]:
raise ValueError("colormap must provide colors of"
"length 3 (RGB) or 4 (RGBA)")
array = array.reshape((height, width, nblayers))
assert array.shape == (height, width, nblayers)
if nblayers == 3:
array = np.concatenate((array, np.ones((height, width, 1))), axis=2)
nblayers = 4
assert array.shape == (height, width, nblayers)
assert nblayers == 4
# Normalize to uint8 if it isn't already.
if array.dtype != 'uint8':
array = array * 255./array.max(axis=(0, 1)).reshape((1, 1, 4))
array = array.astype('uint8')
# Eventually flip the image.
if origin == 'lower':
array = array[::-1, :, :]
# Transform the array to bytes.
raw_data = b''.join([b'\x00' + array[i, :, :].tobytes()
for i in range(height)])
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
return b''.join([
b'\x89PNG\r\n\x1a\n',
png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 6, 0, 0, 0)),
png_pack(b'IDAT', zlib.compress(raw_data, 9)),
png_pack(b'IEND', b'')])
def _camelify(out):
return (''.join(["_" + x.lower() if i < len(out)-1 and x.isupper() and out[i+1].islower() # noqa
else x.lower() + "_" if i < len(out)-1 and x.islower() and out[i+1].isupper() # noqa
else x.lower() for i, x in enumerate(list(out))])).lstrip('_').replace('__', '_') # noqa
def _parse_size(value):
try:
if isinstance(value, int) or isinstance(value, float):
value_type = 'px'
value = float(value)
assert value > 0
else:
value_type = '%'
value = float(value.strip('%'))
assert 0 <= value <= 100
except:
msg = "Cannot parse value {!r} as {!r}".format
raise ValueError(msg(value, value_type))
return value, value_type
def _locations_mirror(x):
"""Mirrors the points in a list-of-list-of-...-of-list-of-points.
For example:
>>> _locations_mirror([[[1, 2], [3, 4]], [5, 6], [7, 8]])
[[[2, 1], [4, 3]], [6, 5], [8, 7]]
"""
if hasattr(x, '__iter__'):
if hasattr(x[0], '__iter__'):
return list(map(_locations_mirror, x))
else:
return list(x[::-1])
else:
return x
def _locations_tolist(x):
"""Transforms recursively a list of iterables into a list of list.
"""
if hasattr(x, '__iter__'):
return list(map(_locations_tolist, x))
else:
return x
| mit |
xiaoxiamii/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
jkitchin/jasp | jasp/jasp_bandstructure.py | 3 | 3749 | '''Calculate bandstructure diagrams in jasp'''
from jasp import *
import os
import matplotlib.pyplot as plt
from ase.dft import DOS
def get_bandstructure(self,
kpts_path=None,
kpts_nintersections=10):
"""Calculate band structure along :param kpts_path:
:param list kpts_path: list of tuples of (label, k-point) to
calculate path on.
:param int kpts_nintersections: is the number of points between
points in band structures. More makes the bands smoother. See
:func:`jasp_kpts.write_kpoints`.
>>> from jasp import *
>>> from jasp.jasp_bandstructure import *
>>> with jasp('bulk/tio2/step3') as calc:
... n, bands, p = calc.get_bandstructure(kpts_path=[('$\Gamma$',[0.0, 0.0, 0.0]),
('X',[0.5, 0.5, 0.0]),
('X',[0.5, 0.5, 0.0]),
('M',[0.0, 0.5, 0.5]),
('M',[0.0, 0.5, 0.5]),
('$\Gamma$',[0.0, 0.0, 0.0])])
>>> p.savefig('images/tio2-bandstructure-dos.png')
returns (npoints, band_energies, fighandle)
"""
kpts = [k[1] for k in kpts_path]
labels = [k[0] for k in kpts_path]
dos = DOS(self, width=0.2)
d = dos.get_dos()
e = dos.get_energies()
ef = self.get_fermi_level()
# run in non-selfconsistent directory
cwd = os.getcwd()
base, end = os.path.split(cwd)
wd = cwd + '/bandstructure'
self.clone(wd)
with jasp(wd,
kpts=kpts,
kpts_nintersections=kpts_nintersections,
reciprocal=True,
nsw=0, # no ionic updates required
isif=None,
ibrion=None,
debug=logging.DEBUG,
icharg=11) as calc:
calc.calculate()
fig = plt.figure()
with open('EIGENVAL') as f:
line1 = f.readline()
line2 = f.readline()
line3 = f.readline()
line4 = f.readline()
comment = f.readline()
unknown, npoints, nbands = [int(x) for x in f.readline().split()]
blankline = f.readline()
band_energies = [[] for i in range(nbands)]
for i in range(npoints):
x, y, z, weight = [float(x) for x in f.readline().split()]
for j in range(nbands):
fields = f.readline().split()
id, energy = int(fields[0]), float(fields[1])
band_energies[id-1].append(energy)
blankline = f.readline()
f.close()
ax1 = plt.subplot(121)
for i in range(nbands):
plt.plot(range(npoints), np.array(band_energies[i]) - ef)
ax = plt.gca()
ax.set_xticks([]) # no tick marks
plt.xlabel('k-vector')
plt.ylabel('Energy (eV)')
nticks = len(labels)/2 + 1
ax.set_xticks(np.linspace(0, npoints, nticks))
L = []
L.append(labels[0])
for i in range(2, len(labels)):
if i % 2 == 0:
L.append(labels[i])
else:
pass
L.append(labels[-1])
ax.set_xticklabels(L)
plt.axhline(0, c='r')
plt.subplot(122, sharey=ax1)
plt.plot(d, e)
plt.axhline(0, c='r')
plt.ylabel('energy (eV)')
plt.xlabel('DOS')
plt.subplots_adjust(wspace=0.26)
plt.show()
return (npoints, band_energies, fig)
Vasp.get_bandstructure = get_bandstructure
| gpl-2.0 |
CondensedOtters/PHYSIX_Utils | Projects/Moog_2016-2019/CO2/CO2_NN/analysis.py | 1 | 9175 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 05:54:11 2020
@author: mathieumoog
"""
import cpmd
import filexyz
import numpy as np
import matplotlib.pyplot as plt
# MSMbuilder ( lacks CK validation )
from msmbuilder.msm import MarkovStateModel
from msmbuilder.msm import BayesianMarkovStateModel
from msmbuilder.utils import dump
# PyEMMMA ( has CK validation )
import pyemma as pe
from pyemma.datasets import double_well_discrete
def getDistance1Dsq( position1, position2, length):
dist = position1-position2
half_length = length*0.5
if dist > half_length :
dist -= length
elif dist < -half_length:
dist += length
return dist*dist
def getDistanceOrtho( positions, index1, index2, cell_lengths ):
dist=0
for i in range(3):
dist += getDistance1Dsq( positions[index1,i], positions[index2,i], cell_lengths[i] )
return np.sqrt(dist)
def computeContactMatrix( positions, cell_lengths, cut_off ):
nb_atoms = len(positions[:,0])
matrix = np.zeros(( nb_atoms, nb_atoms ))
for atom in range(nb_atoms):
for atom2 in range(atom+1,nb_atoms):
if getDistanceOrtho( positions, atom, atom2, cell_lengths ) < cut_off :
matrix[atom,atom2] = 1
matrix[atom2,atom] = 1
return matrix
def computeTransitionMatrix( states, nb_states, tau, step_max ):
nb_step = len(states)
matrix = np.zeros((nb_states,nb_states))
for step in range( nb_step-step_max ):
matrix[ states[step], states[step+tau] ] += 1
return matrix
def computeChapmanKolmogorov( matrix, nb_states ):
matrix_ck = np.zeros((nb_states,nb_states),dtype=float)
for state_i in range( nb_states ):
for state_j in range( nb_states ):
for i in range(nb_states):
matrix_ck[ state_i, state_j ] += matrix[state_i,i]*matrix[i,state_j]
return matrix_ck
volume=8.82
temperature=3000
# run_nb=1
path_sim = str( "/Users/mathieumoog/Documents/CO2/" +
str(volume) + "/" +
str(temperature) + "K/"
# + str(run_nb) + "-run/"
)
cell_lengths = np.ones(3)*volume
traj_path = str( path_sim + "TRAJEC_fdb_wrapped.xyz" )
traj = filexyz.readAsArray( traj_path )
nbC=32
nbO=64
nb_atoms=nbC+nbO
max_neigh=5
nb_step=len(traj[:,0,0])
cut_off = 1.75
min_stat=1000
# Build States
coordC = np.zeros( (nb_step,nbC), dtype=int )
coordO = np.zeros( (nb_step,nbO), dtype=int )
for step in range(nb_step):
matrix = computeContactMatrix( traj[step,:,:], cell_lengths, cut_off)
for carbon in range(0,nbC):
coordC[ step, carbon ] = int( sum(matrix[carbon,:]) )
for oxygen in range(nbC,nb_atoms):
coordO[ step, oxygen-nbC ] = int( sum(matrix[oxygen,:]) )
c_min = coordC.min()
o_min = coordO.min()
# Adapting the labels to make sure they are in the 0-nb_states range
coordC -= c_min
coordO -= c_min
msm = MarkovStateModel( lag_time=1, n_timescales=6)
msm.fit( coordC[:,0] )
msm.timescales_
# Computing nb of states (max)
nb_states_C = coordC.max()+1
nb_states_O = coordO.max()+1
# Computing Equilibrium States Probabilities
coordC_hist = np.zeros( nb_states_C )
ones_ = np.ones((nb_step,nbC), dtype=int )
for i in range( nb_states_C ):
coordC_hist[i] = sum( ones_[ coordC == i ] )
# Clean marginal states
# for state in range( nb_states_C ):
# if coordC_hist[state] < min_stat:
# mask_to_clean = coordC[ :, : ]
coordC_hist /= sum(coordC_hist[:])
# Computing Equilibrium States Probabilities, cleaning marginals
ones_ = np.ones((nb_step,nbO), dtype=int )
coordO_hist = np.zeros( nb_states_O )
for i in range( nb_states_O ):
coordO_hist[i] = sum( ones_[ coordO == i ] )
coordO_hist /= sum(coordO_hist[:])
# Plotting Oxygens
plt.figure()
plt.plot(coordC_hist,"b.-")
plt.plot(coordO_hist,"r.-")
plt.legend(["C states","O states"])
plt.show()
dt=5*0.001
frac = 0.75
max_step=int(nb_step*frac)
nb_tau_min=int(250)
nb_tau_max=int(2*nb_tau_min)
# Computing Transition Matrix for a given tau
matrix_tot=np.zeros((nb_states_C,nb_states_C,nb_tau_max), dtype=float )
matrix_tot_ck=np.zeros((nb_states_C,nb_states_C,nb_tau_min), dtype=float )
for tau in range(nb_tau_max):
matrix = np.zeros((nb_states_C,nb_states_C),dtype=float)
for carbon in range(nbC):
matrix += computeTransitionMatrix( coordC[:,carbon], nb_states_C, tau+1, max_step )
for state in range(nb_states_C):
matrix[state,:] /= sum( matrix[state,:] )
matrix_tot[:,:,tau] = matrix[:,:]
if tau < nb_tau_min:
matrix_tot_ck[:,:,tau] = computeChapmanKolmogorov( matrix_tot[:,:,tau], nb_states_C )
carbon_target=3
matrix_markov = np.zeros( (4,4,nb_tau_min), dtype=float )
matrix_markov_ck = np.zeros( (4,4,nb_tau_min), dtype=float )
for tau in range(1,nb_tau_min+1):
msm_matrix = MarkovStateModel( lag_time=tau, reversible_type="mle" ,n_timescales=nb_states_C, ergodic_cutoff="on", sliding_window=True, verbose=True)
msm_matrix.fit( coordC[:,carbon_target] )
matrix_markov[:,:,tau-1] = msm_matrix.transmat_
for state_i in range( len(matrix_markov) ):
for state_j in range( len(matrix_markov) ):
for i in range( len(matrix_markov) ):
matrix_markov_ck[ state_i, state_j, tau-1 ] += matrix_markov[state_i,i,tau-1]*matrix_markov[i,state_j,tau-1]
# PyEMMA
lags = [1,5,10,15,20,50,100,200]
implied_timescales = pe.msm.its(dtrajs=coordC[:,carbon_target].tolist(),lags=lags)
pe.plots.plot_implied_timescales(implied_timescales,units='time-steps', ylog=False)
M = pe.msm.estimate_markov_model(dtrajs=coordC[:,carbon_target].tolist(), lag = 10 )
cktest = M.cktest(nsets=3)
cktplt = pe.plots.plot_cktest(cktest)
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("P_ij, P_ij^CK")
# plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[0,0,:], "k-" )
# plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[0,0,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_min,dt*1), matrix_markov[0,0,:], "k-" )
plt.plot( np.arange(0,2*dt*nb_tau_min,dt*2), matrix_markov_ck[0,0,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,1,:], "r-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,1,:], "r--" )
plt.plot( np.arange(0,dt*nb_tau_min,dt*1), matrix_markov[0,1,:], "k-" )
plt.plot( np.arange(0,2*dt*nb_tau_min,dt*2), matrix_markov_ck[0,1,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,2,:], "b-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,2,:], "b--" )
plt.plot( np.arange(0,dt*nb_tau_min,dt*1), matrix_markov[0,2,:], "k-" )
plt.plot( np.arange(0,2*dt*nb_tau_min,dt*2), matrix_markov_ck[0,2,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,3,:], "g-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,3,:], "g--" )
plt.plot( np.arange(0,dt*nb_tau_min,dt*1), matrix_markov[0,3,:], "k-" )
plt.plot( np.arange(0,2*dt*nb_tau_min,dt*2), matrix_markov_ck[0,3,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,4,:], "m-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,4,:], "m--" )
plt.show()
rmseC = np.zeros(nb_tau_min, dtype=float)
for tau in range(nb_tau_min):
mat = matrix_tot[:,:,2*tau]-matrix_tot_ck[:,:,tau]
rmseC[tau] = sum(sum( mat*mat ))/(nb_states_C*nb_states_C)
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("RMSE C (%)")
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), rmseC*100 )
plt.show()
matrix_tot=np.zeros((nb_states_O,nb_states_O,nb_tau_max), dtype=float )
matrix_tot_ck=np.zeros((nb_states_O,nb_states_O,nb_tau_min), dtype=float )
for tau in range(nb_tau_max):
matrix = np.zeros((nb_states_O,nb_states_O),dtype=float)
for carbon in range(nbC):
matrix += computeTransitionMatrix( coordO[:,carbon], nb_states_O, tau, max_step )
for state in range(nb_states_O):
matrix[state,:] /= sum( matrix[state,:] )
matrix_tot[:,:,tau] = matrix[:,:]
if tau < nb_tau_min:
matrix_tot_ck[:,:,tau] = computeChapmanKolmogorov( matrix_tot[:,:,tau], nb_states_O )
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("P_ij, P_ij^CK")
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[0,0,:], "k-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[0,0,:], "k--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[1,1,:], "r-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[1,1,:], "r--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[2,2,:], "b-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[2,2,:], "b--" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*1), matrix_tot[3,3,:], "g-" )
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), matrix_tot_ck[3,3,:], "g--" )
plt.show()
rmseO = np.zeros(nb_tau_min, dtype=float)
for tau in range(nb_tau_min):
mat = matrix_tot[:,:,2*tau]-matrix_tot_ck[:,:,tau]
rmseO[tau] = sum(sum( mat*mat ))/(nb_states_O*nb_states_O)
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("RMSE O (%)")
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), rmseO*100 )
plt.show()
plt.figure()
plt.xlabel("Time lag (ps)")
plt.ylabel("RMSE all (%)")
plt.plot( np.arange(0,dt*nb_tau_max,dt*2), (rmseO+rmseC)*100*0.5 )
plt.show()
| gpl-3.0 |
timestocome/Test-stock-prediction-algorithms | StockMarketLinearRegression/PredictGold.py | 2 | 3318 | # http://github.com/timestocome
# Attempt to predict gold prices and find outliers
# http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/TestForRandomness_RunsTest.pdf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
######################################################################
# load data
########################################################################
# read in gold file
data = pd.read_csv('data/Gold_all.csv', parse_dates=True, index_col=0)
data = data[['Open']]
# convert to log values
#data['Open'] = np.log(data['Open'])
data['Open'] = pd.to_numeric(data['Open'], errors='coerce')
data['Volatility'] = data['Open'] - data['Open'].shift(1)
data = data.dropna()
gold_standard = data.loc[data.index < '01-01-1971']
gold = data.loc[data.index > '01-01-1971']
print(len(gold_standard), len(gold))
########################################################################
# try to fit linear regression model
from sklearn import linear_model
x1 = np.arange(1, len(gold)+ 1)
x2 = x1 **2
x3 = x1 **3
x4 = x1 **4 # best so far
x = [x1, x2, x3, x4]
x = np.reshape(x, (4, len(gold))).T
print(x.shape)
regression = linear_model.LinearRegression()
regression.fit(x, gold['Open'])
coeffs = regression.coef_
intercept = regression.intercept_
print(coeffs[0], coeffs[1])
gold['Regression'] = intercept + coeffs[0] * x1 + coeffs[1] * x2 + coeffs[2] * x3 + coeffs[3] * x4
gold['Residuals'] = gold['Open'] - gold['Regression']
std_regression = gold['Regression'].std()
std_open = gold['Open'].std()
##################################################################
# Run's Test, part 3 of paper
gold_mean = gold['Open'].mean()
runs = gold['Open'] > gold['Regression']
# convert runs data to number of runs
R = 0
r_prev = runs[0]
for r in runs:
if r != r_prev: R += 1
r_prev = r
T = len(runs)
Ta = runs.sum()
Tb = T - Ta
E = (T + 2 * Ta * Tb) / T # expected runs
V = (2 * Ta * Tb * (2*Ta*Tb - T)) / (T **2 * (T - 1)) # variance of runs
Z1 = (R - E) / std_open
Z2 = (R -E) / std_regression
print("Run's Test Results")
print("R %lf, E %lf, V %lf" % (R, E, V))
print("Z (not random if Z > +/- 2.5)", Z1, Z2)
print("Regression:")
print("Start date", gold.ix[-1])
print("Start step", len(x))
print("intercept", intercept)
print("coeff", coeffs)
#######################################################################
# predict next 12 months ~253 trading days
dates = pd.bdate_range('1971-01-01', '2018-12-31')
x1 = np.arange(1, len(dates) + 1)
x2 = x1 **2
x3 = x1 **3
x4 = x1 **4
gold_futures = intercept + coeffs[0] * x1 + coeffs[1] * x2 + coeffs[2] * x3 + coeffs[3] * x4
std_regression = gold['Regression'].std()
predicted = pd.DataFrame(data=gold_futures, index=dates)
predicted.index.name = 'Date'
predicted.columns = ['Open']
actual = pd.read_csv('data/Gold_all.csv', parse_dates=True, index_col=0)
actual = actual.loc[actual.index > '01-01-1971']
actual = actual['Open']
plt.figure(figsize=(18, 16))
plt.plot(actual, label="Actual")
plt.plot(predicted, label="Predicted")
plt.plot(predicted - std_regression, label='Predicted - std')
plt.plot(predicted + std_regression, label='Predicted + std')
plt.legend(loc='best')
plt.title("Gold 1971 - predicted 2019")
plt.savefig("Gold_Predictions_2018.png")
plt.show()
| mit |
aerrami/mtools | mtools/test/test_all_import.py | 7 | 1549 | from nose.tools import nottest, make_decorator
from functools import wraps
# tools without any external dependencies
from mtools.mlogfilter.mlogfilter import MLogFilterTool
from mtools.mlogvis.mlogvis import MLogVisTool
from mtools.mloginfo.mloginfo import MLogInfoTool
tools = [MLogFilterTool, MLogVisTool, MLogInfoTool]
# mlaunch depends on pymongo
try:
from mtools.mlaunch.mlaunch import MLaunchTool
tools.append(MLaunchTool)
except ImportError:
pass
# mplotqueries depends on matplotlib
try:
from mtools.mplotqueries.mplotqueries import MPlotQueriesTool
tools.append(MPlotQueriesTool)
except ImportError:
pass
def all_tools(fn):
""" This is a decorator for test functions, that runs a loop over all command line tool
classes imported above and passes each class to the test function.
To use this decorator, the test function must accept a single parameter. Example:
@all_tools
def test_something(tool_cls):
tool = tool_cls()
# test tool here ...
"""
@wraps(fn) # copies __name__ of the original function, nose requires the name to start with "test_"
def new_func():
for tool in tools:
fn(tool)
return new_func
def test_import_all():
""" Import all tools from mtools module.
The tools that have external dependencies will only be imported if the dependencies are fulfilled.
This test just passes by default because the imports are tested implicitly by loading this file.
"""
pass
| apache-2.0 |
nikitasingh981/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 70 | 7486 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
gskielian/SimpleCV | scripts/install/win/OpenKinect/freenect-examples/demo_mp_async.py | 15 | 1082 | #!/usr/bin/env python
import freenect
import matplotlib.pyplot as mp
import signal
import frame_convert
mp.ion()
image_rgb = None
image_depth = None
keep_running = True
def display_depth(dev, data, timestamp):
global image_depth
data = frame_convert.pretty_depth(data)
mp.gray()
mp.figure(1)
if image_depth:
image_depth.set_data(data)
else:
image_depth = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def display_rgb(dev, data, timestamp):
global image_rgb
mp.figure(2)
if image_rgb:
image_rgb.set_data(data)
else:
image_rgb = mp.imshow(data, interpolation='nearest', animated=True)
mp.draw()
def body(*args):
if not keep_running:
raise freenect.Kill
def handler(signum, frame):
global keep_running
keep_running = False
print('Press Ctrl-C in terminal to stop')
signal.signal(signal.SIGINT, handler)
freenect.runloop(depth=display_depth,
video=display_rgb,
body=body)
| bsd-3-clause |
h2educ/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
rubikloud/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
molpopgen/pyseq | docs/conf.py | 2 | 9974 | # -*- coding: utf-8 -*-
#
# pylibseq documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 19 19:11:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import subprocess
import shlex
#os.environ['LD_LIBRARY_PATH']=sys.prefix+'/lib'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
if (os.environ.get('READTHEDOCS')=="True") is False:
sys.path.insert(0, os.path.abspath('..'))
else:
import site
p=site.getsitepackages()[0]
sys.path.insert(0,p)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pylibseq'
copyright = u'2015, Kevin Thornton'
author = u'Kevin Thornton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.3'
# The full version, including alpha/beta/rc tags.
release = '0.2.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
if (os.environ.get('READTHEDOCS')=="True") is True:
html_theme_options = {
'github_user':'molpopgen',
'github_repo':'pylibseq',
# 'github_button':True,
# 'github_banner':True,
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pylibseqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pylibseq.tex', u'pylibseq Documentation',
u'Kevin Thornton', 'manual'),
]
autoclass_content = 'both'
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pylibseq', u'pylibseq Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pylibseq', u'pylibseq Documentation',
author, 'pylibseq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-2.0 |
sriki18/scipy | scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
nomadcube/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mikelseverson/Udacity-Deep_Learning-Nanodegree | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
JPalmerio/GRB_population_code | catalogs/GBM_cat/GBM_Ep_constraint_testing.py | 1 | 1178 | import sys
import platform
if platform.system() == 'Linux':
sys.path.insert(0,'/nethome/palmerio/Dropbox/Plotting_GUI/Src')
elif platform.system() == 'Darwin':
sys.path.insert(0,'/Users/palmerio/Dropbox/Plotting_GUI/Src')
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import plotting_functions as pf
from matplotlib.transforms import blended_transform_factory
plt.style.use('ggplot')
fig = plt.figure()
ax = fig.add_subplot(111)
root_dir = '/nethome/palmerio/1ere_annee/Frederic/GRB_population_code/Model_outputs/'
filename = root_dir +'run_LIA/EpGBM_constraint.dat'
Ep_bins = pf.read_data(filename, 0)
Ep_hist_mod = pf.read_data(filename, 1)
Ep_hist_obs = pf.read_data(filename, 2)
x=np.linspace(1.,4., 500)
y = max(Ep_hist_obs) * pf.gaussian(x, 2.25, 0.35)
y2 = max(Ep_hist_obs) * pf.gaussian(x, 2.25, 0.375)
ep = np.linspace(1,4, 100)
ep_gauss = pf.gaussian(ep, 2.2, 0.4)*max(Ep_hist_obs)
ax.plot(Ep_bins, Ep_hist_obs, label = 'Observations')
#ax.plot(Ep_bins, Ep_hist_mod, label = 'MC simulation')
#ax.plot(ep, ep_gauss, ls=':', lw=2)
ax.plot(x,y, label='gaussian')
ax.plot(x,y2, label='gaussian2')
ax.legend(loc='best')
plt.show()
| gpl-3.0 |
almarklein/scikit-image | doc/examples/plot_regionprops.py | 2 | 1300 | """
=========================
Measure region properties
=========================
This example shows how to measure properties of labelled image regions.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from skimage.draw import ellipse
from skimage.morphology import label
from skimage.measure import regionprops
from skimage.transform import rotate
image = np.zeros((600, 600))
rr, cc = ellipse(300, 350, 100, 220)
image[rr,cc] = 1
image = rotate(image, angle=15, order=0)
label_img = label(image)
regions = regionprops(label_img)
plt.imshow(image)
for props in regions:
y0, x0 = props.centroid
orientation = props.orientation
x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
plt.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
plt.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
plt.plot(x0, y0, '.g', markersize=15)
minr, minc, maxr, maxc = props.bbox
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
plt.plot(bx, by, '-b', linewidth=2.5)
plt.gray()
plt.axis((0, 600, 600, 0))
plt.show()
| bsd-3-clause |
huzq/scikit-learn | examples/applications/plot_outlier_detection_wine.py | 17 | 5819 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM does not assume any parametric form of the data distribution
and can therefore model the complex shape of the data much better.
First example
-------------
The first example illustrates how the Minimum Covariance Determinant
robust estimator can help concentrate on a relevant cluster when outlying
points exist. Here the empirical covariance estimation is skewed by points
outside of the main cluster. Of course, some screening tools would have pointed
out the presence of two clusters (Support Vector Machines, Gaussian Mixture
Models, univariate outlier detection, ...). But had it been a high-dimensional
example, none of these could be applied that easily.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_wine
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.25),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.25),
"OCSVM": OneClassSVM(nu=0.25, gamma=0.35)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Get data
X1 = load_wine()['data'][:, [1, 2]] # two clusters
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(0, 6, 500), np.linspace(1, 4.5, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list(legend1.values())
legend1_keys_list = list(legend1.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (wine recognition)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("outlying points", xy=(4, 2),
xycoords="data", textcoords="data",
xytext=(3, 1.25), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.ylabel("ash")
plt.xlabel("malic_acid")
plt.show()
# %%
# Second example
# --------------
# The second example shows the ability of the Minimum Covariance Determinant
# robust estimator of covariance to concentrate on the main mode of the data
# distribution: the location seems to be well estimated, although the
# covariance is hard to estimate due to the banana-shaped distribution. Anyway,
# we can get rid of some outlying observations. The One-Class SVM is able to
# capture the real data structure, but the difficulty is to adjust its kernel
# bandwidth parameter so as to obtain a good compromise between the shape of
# the data scatter matrix and the risk of over-fitting the data.
# Get data
X2 = load_wine()['data'][:, [6, 9]] # "banana"-shaped
# Learn a frontier for outlier detection with several classifiers
xx2, yy2 = np.meshgrid(np.linspace(-1, 5.5, 500), np.linspace(-2.5, 19, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend2_values_list = list(legend2.values())
legend2_keys_list = list(legend2.keys())
# Plot the results (= shape of the data points cloud)
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (wine recognition)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.ylabel("color_intensity")
plt.xlabel("flavanoids")
plt.show()
| bsd-3-clause |
lamastex/scalable-data-science | dbcArchives/2021/000_6-sds-3-x-dl/055_DLbyABr_04-ConvolutionalNetworks.py | 1 | 22551 | # Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# MAGIC
# MAGIC This is a 2019-2021 augmentation and update of [Adam Breindel](https://www.linkedin.com/in/adbreind)'s initial notebooks.
# MAGIC
# MAGIC _Thanks to [Christian von Koch](https://www.linkedin.com/in/christianvonkoch/) and [William Anzén](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials Spark 3.0.1 and Python 3+ compliant._
# COMMAND ----------
# MAGIC %md
# MAGIC # Convolutional Neural Networks
# MAGIC ## aka CNN, ConvNet
# COMMAND ----------
# MAGIC %md
# MAGIC As a baseline, let's start a lab running with what we already know.
# MAGIC
# MAGIC We'll take our deep feed-forward multilayer perceptron network, with ReLU activations and reasonable initializations, and apply it to learning the MNIST digits.
# MAGIC
# MAGIC The main part of the code looks like the following (full code you can run is in the next cell):
# MAGIC
# MAGIC ```
# MAGIC # imports, setup, load data sets
# MAGIC
# MAGIC model = Sequential()
# MAGIC model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(15, kernel_initializer='normal', activation='relu'))
# MAGIC model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# MAGIC model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
# MAGIC
# MAGIC categorical_labels = to_categorical(y_train, num_classes=10)
# MAGIC
# MAGIC history = model.fit(X_train, categorical_labels, epochs=100, batch_size=100)
# MAGIC
# MAGIC # print metrics, plot errors
# MAGIC ```
# MAGIC
# MAGIC Note the changes, which are largely about building a classifier instead of a regression model:
# MAGIC * Output layer has one neuron per category, with softmax activation
# MAGIC * __Loss function is cross-entropy loss__
# MAGIC * Accuracy metric is categorical accuracy
# COMMAND ----------
# MAGIC %md
# MAGIC Let's hold pointers into wikipedia for these new concepts.
# COMMAND ----------
# MAGIC %scala
# MAGIC //This allows easy embedding of publicly available information into any other notebook
# MAGIC //Example usage:
# MAGIC // displayHTML(frameIt("https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation#Topics_in_LDA",250))
# MAGIC def frameIt( u:String, h:Int ) : String = {
# MAGIC """<iframe
# MAGIC src=""""+ u+""""
# MAGIC width="95%" height="""" + h + """"
# MAGIC sandbox>
# MAGIC <p>
# MAGIC <a href="http://spark.apache.org/docs/latest/index.html">
# MAGIC Fallback link for browsers that, unlikely, don't support frames
# MAGIC </a>
# MAGIC </p>
# MAGIC </iframe>"""
# MAGIC }
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression",500))
# COMMAND ----------
# MAGIC %scala
# MAGIC displayHTML(frameIt("https://en.wikipedia.org/wiki/Softmax_function",380))
# COMMAND ----------
# MAGIC %md
# MAGIC The following is from: [https://www.quora.com/How-does-Keras-calculate-accuracy](https://www.quora.com/How-does-Keras-calculate-accuracy).
# MAGIC
# MAGIC **Categorical accuracy:**
# MAGIC
# MAGIC ```%python
# MAGIC def categorical_accuracy(y_true, y_pred):
# MAGIC return K.cast(K.equal(K.argmax(y_true, axis=-1),
# MAGIC K.argmax(y_pred, axis=-1)),
# MAGIC K.floatx())
# MAGIC ```
# MAGIC
# MAGIC > `K.argmax(y_true)` takes the highest value to be the prediction and matches against the comparative set.
# COMMAND ----------
# MAGIC %md
# MAGIC Watch (1:39)
# MAGIC * [![Udacity: Deep Learning by Vincent Vanhoucke - Cross-entropy](http://img.youtube.com/vi/tRsSi_sqXjI/0.jpg)](https://www.youtube.com/watch?v=tRsSi_sqXjI)
# MAGIC
# MAGIC Watch (1:54)
# MAGIC * [![Udacity: Deep Learning by Vincent Vanhoucke - Minimizing Cross-entropy](http://img.youtube.com/vi/x449QQDhMDE/0.jpg)](https://www.youtube.com/watch?v=x449QQDhMDE)
# COMMAND ----------
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import sklearn.datasets
import datetime
import matplotlib.pyplot as plt
import numpy as np
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
model = Sequential()
model.add(Dense(20, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(15, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
categorical_labels = to_categorical(y_train, num_classes=10)
start = datetime.datetime.today()
history = model.fit(X_train, categorical_labels, epochs=40, batch_size=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_test, to_categorical(y_test, num_classes=10))
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
print ("Start: " + str(start))
end = datetime.datetime.today()
print ("End: " + str(end))
print ("Elapse: " + str(end-start))
# COMMAND ----------
# MAGIC %md
# MAGIC after about a minute we have:
# MAGIC
# MAGIC ```
# MAGIC ...
# MAGIC
# MAGIC Epoch 40/40
# MAGIC 1s - loss: 0.0610 - categorical_accuracy: 0.9809 - val_loss: 0.1918 - val_categorical_accuracy: 0.9583
# MAGIC
# MAGIC ...
# MAGIC
# MAGIC loss: 0.216120
# MAGIC
# MAGIC categorical_accuracy: 0.955000
# MAGIC
# MAGIC Start: 2017-12-06 07:35:33.948102
# MAGIC
# MAGIC End: 2017-12-06 07:36:27.046130
# MAGIC
# MAGIC Elapse: 0:00:53.098028
# MAGIC ```
# COMMAND ----------
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC What are the big takeaways from this experiment?
# MAGIC
# MAGIC 1. We get pretty impressive "apparent error" accuracy right from the start! A small network gets us to training accuracy 97% by epoch 20
# MAGIC 2. The model *appears* to continue to learn if we let it run, although it does slow down and oscillate a bit.
# MAGIC 3. Our test accuracy is about 95% after 5 epochs and never gets better ... it gets worse!
# MAGIC 4. Therefore, we are overfitting very quickly... most of the "training" turns out to be a waste.
# MAGIC 5. For what it's worth, we get 95% accuracy without much work.
# MAGIC
# MAGIC This is not terrible compared to other, non-neural-network approaches to the problem. After all, we could probably tweak this a bit and do even better.
# MAGIC
# MAGIC But we talked about using deep learning to solve "95%" problems or "98%" problems ... where one error in 20, or 50 simply won't work. If we can get to "multiple nines" of accuracy, then we can do things like automate mail sorting and translation, create cars that react properly (all the time) to street signs, and control systems for robots or drones that function autonomously.
# MAGIC
# MAGIC Try two more experiments (try them separately):
# MAGIC 1. Add a third, hidden layer.
# MAGIC 2. Increase the size of the hidden layers.
# MAGIC
# MAGIC Adding another layer slows things down a little (why?) but doesn't seem to make a difference in accuracy.
# MAGIC
# MAGIC Adding a lot more neurons into the first topology slows things down significantly -- 10x as many neurons, and only a marginal increase in accuracy. Notice also (in the plot) that the learning clearly degrades after epoch 50 or so.
# MAGIC
# MAGIC ... We need a new approach!
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC ... let's think about this:
# MAGIC
# MAGIC ### What is layer 2 learning from layer 1? Combinations of pixels
# MAGIC
# MAGIC #### Combinations of pixels contain information but...
# MAGIC
# MAGIC There are a lot of them (combinations) and they are "fragile"
# MAGIC
# MAGIC In fact, in our last experiment, we basically built a model that memorizes a bunch of "magic" pixel combinations.
# MAGIC
# MAGIC What might be a better way to build features?
# MAGIC
# MAGIC * When humans perform this task, we look not at arbitrary pixel combinations, but certain geometric patterns -- lines, curves, loops.
# MAGIC * These features are made up of combinations of pixels, but they are far from arbitrary
# MAGIC * We identify these features regardless of translation, rotation, etc.
# MAGIC
# MAGIC Is there a way to get the network to do the same thing?
# MAGIC
# MAGIC I.e., in layer one, identify pixels. Then in layer 2+, identify abstractions over pixels that are translation-invariant 2-D shapes?
# MAGIC
# MAGIC We could look at where a "filter" that represents one of these features (e.g., and edge) matches the image.
# MAGIC
# MAGIC How would this work?
# MAGIC
# MAGIC ### Convolution
# MAGIC
# MAGIC Convolution in the general mathematical sense is define as follows:
# MAGIC
# MAGIC <img src="https://i.imgur.com/lurC2Cx.png" width=300>
# MAGIC
# MAGIC The convolution we deal with in deep learning is a simplified case. We want to compare two signals. Here are two visualizations, courtesy of Wikipedia, that help communicate how convolution emphasizes features:
# MAGIC
# MAGIC <img src="http://i.imgur.com/EDCaMl2.png" width=500>
# MAGIC
# MAGIC ---
# MAGIC
# MAGIC #### Here's an animation (where we change \\({\tau}\\))
# MAGIC <img src="http://i.imgur.com/0BFcnaw.gif">
# MAGIC
# MAGIC __In one sense, the convolution captures and quantifies the pattern matching over space__
# MAGIC
# MAGIC If we perform this in two dimensions, we can achieve effects like highlighting edges:
# MAGIC
# MAGIC <img src="http://i.imgur.com/DKEXIII.png">
# MAGIC
# MAGIC The matrix here, also called a convolution kernel, is one of the functions we are convolving. Other convolution kernels can blur, "sharpen," etc.
# MAGIC
# MAGIC ### So we'll drop in a number of convolution kernels, and the network will learn where to use them? Nope. Better than that.
# MAGIC
# MAGIC ## We'll program in the *idea* of discrete convolution, and the network will learn what kernels extract meaningful features!
# MAGIC
# MAGIC The values in a (fixed-size) convolution kernel matrix will be variables in our deep learning model. Although inuitively it seems like it would be hard to learn useful params, in fact, since those variables are used repeatedly across the image data, it "focuses" the error on a smallish number of parameters with a lot of influence -- so it should be vastly *less* expensive to train than just a huge fully connected layer like we discussed above.
# MAGIC
# MAGIC This idea was developed in the late 1980s, and by 1989, Yann LeCun (at AT&T/Bell Labs) had built a practical high-accuracy system (used in the 1990s for processing handwritten checks and mail).
# MAGIC
# MAGIC __How do we hook this into our neural networks?__
# MAGIC
# MAGIC * First, we can preserve the geometric properties of our data by "shaping" the vectors as 2D instead of 1D.
# MAGIC
# MAGIC * Then we'll create a layer whose value is not just activation applied to weighted sum of inputs, but instead it's the result of a dot-product (element-wise multiply and sum) between the kernel and a patch of the input vector (image).
# MAGIC * This value will be our "pre-activation" and optionally feed into an activation function (or "detector")
# MAGIC
# MAGIC <img src="http://i.imgur.com/ECyi9lL.png">
# MAGIC
# MAGIC
# MAGIC If we perform this operation at lots of positions over the image, we'll get lots of outputs, as many as one for every input pixel.
# MAGIC
# MAGIC
# MAGIC <img src="http://i.imgur.com/WhOrJ0Y.jpg">
# MAGIC
# MAGIC * So we'll add another layer that "picks" the highest convolution pattern match from nearby pixels, which
# MAGIC * makes our pattern match a little bit translation invariant (a fuzzy location match)
# MAGIC * reduces the number of outputs significantly
# MAGIC * This layer is commonly called a pooling layer, and if we pick the "maximum match" then it's a "max pooling" layer.
# MAGIC
# MAGIC <img src="http://i.imgur.com/9iPpfpb.png">
# MAGIC
# MAGIC __The end result is that the kernel or filter together with max pooling creates a value in a subsequent layer which represents the appearance of a pattern in a local area in a prior layer.__
# MAGIC
# MAGIC __Again, the network will be given a number of "slots" for these filters and will learn (by minimizing error) what filter values produce meaningful features. This is the key insight into how modern image-recognition networks are able to generalize -- i.e., learn to tell 6s from 7s or cats from dogs.__
# MAGIC
# MAGIC <img src="http://i.imgur.com/F8eH3vj.png">
# MAGIC
# MAGIC ## Ok, let's build our first ConvNet:
# MAGIC
# MAGIC First, we want to explicity shape our data into a 2-D configuration. We'll end up with a 4-D tensor where the first dimension is the training examples, then each example is 28x28 pixels, and we'll explicitly say it's 1-layer deep. (Why? with color images, we typically process over 3 or 4 channels in this last dimension)
# MAGIC
# MAGIC A step by step animation follows:
# MAGIC * http://cs231n.github.io/assets/conv-demo/index.html
# COMMAND ----------
train_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"
test_libsvm = "/dbfs/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt"
X_train, y_train = sklearn.datasets.load_svmlight_file(train_libsvm, n_features=784)
X_train = X_train.toarray()
X_test, y_test = sklearn.datasets.load_svmlight_file(test_libsvm, n_features=784)
X_test = X_test.toarray()
X_train = X_train.reshape( (X_train.shape[0], 28, 28, 1) )
X_train = X_train.astype('float32')
X_train /= 255
y_train = to_categorical(y_train, num_classes=10)
X_test = X_test.reshape( (X_test.shape[0], 28, 28, 1) )
X_test = X_test.astype('float32')
X_test /= 255
y_test = to_categorical(y_test, num_classes=10)
# COMMAND ----------
# MAGIC %md
# MAGIC Now the model:
# COMMAND ----------
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid', # no padding; output will be smaller than input
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu')) # alternative syntax for applying activation
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# COMMAND ----------
# MAGIC %md
# MAGIC ... and the training loop and output:
# COMMAND ----------
start = datetime.datetime.today()
history = model.fit(X_train, y_train, batch_size=128, epochs=8, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
fig, ax = plt.subplots()
fig.set_size_inches((5,5))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
display(fig)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Our MNIST ConvNet
# MAGIC
# MAGIC In our first convolutional MNIST experiment, we get to almost 99% validation accuracy in just a few epochs (a minutes or so on CPU)!
# MAGIC
# MAGIC The training accuracy is effectively 100%, though, so we've almost completely overfit (i.e., memorized the training data) by this point and need to do a little work if we want to keep learning.
# MAGIC
# MAGIC Let's add another convolutional layer:
# COMMAND ----------
model = Sequential()
model.add(Conv2D(8, # number of kernels
(4, 4), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(8, (4, 4)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2, validation_split=0.1)
scores = model.evaluate(X_test, y_test, verbose=1)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at a number of "famous" convolutional networks!
# MAGIC
# MAGIC ### LeNet (Yann LeCun, 1998)
# MAGIC
# MAGIC <img src="http://i.imgur.com/k5hMtMK.png">
# MAGIC
# MAGIC <img src="http://i.imgur.com/ERV9pHW.gif">
# COMMAND ----------
# MAGIC %md <img src="http://i.imgur.com/TCN9C4P.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### AlexNet (2012)
# MAGIC
# MAGIC <img src="http://i.imgur.com/CpokDKV.jpg">
# MAGIC
# MAGIC <img src="http://i.imgur.com/Ld2QhXr.jpg">
# COMMAND ----------
# MAGIC %md
# MAGIC ### Back to our labs: Still Overfitting
# MAGIC
# MAGIC We're making progress on our test error -- about 99% -- but just a bit for all the additional time, due to the network overfitting the data.
# MAGIC
# MAGIC There are a variety of techniques we can take to counter this -- forms of regularization.
# MAGIC
# MAGIC Let's try a relatively simple solution solution that works surprisingly well: add a pair of `Dropout` filters, a layer that randomly omits a fraction of neurons from each training batch (thus exposing each neuron to only part of the training data).
# MAGIC
# MAGIC We'll add more convolution kernels but shrink them to 3x3 as well.
# COMMAND ----------
model = Sequential()
model.add(Conv2D(32, # number of kernels
(3, 3), # kernel size
padding='valid',
input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(rate=1-0.25)) # <- regularize, new parameter rate added (rate=1-keep_prob)
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(rate=1-0.5)) # <-regularize, new parameter rate added (rate=1-keep_prob)
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=128, epochs=15, verbose=2)
scores = model.evaluate(X_test, y_test, verbose=2)
print
for i in range(len(model.metrics_names)):
print("%s: %f" % (model.metrics_names[i], scores[i]))
# COMMAND ----------
# MAGIC %md
# MAGIC While that's running, let's look at some more recent ConvNet architectures:
# MAGIC
# MAGIC ### VGG16 (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/gl4kZDf.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ### GoogLeNet (2014)
# MAGIC
# MAGIC <img src="http://i.imgur.com/hvmtDqN.png">
# MAGIC
# MAGIC *"Inception" layer: parallel convolutions at different resolutions*
# MAGIC
# MAGIC ### Residual Networks (2015-)
# MAGIC
# MAGIC Skip layers to improve training (error propagation). Residual layers learn from details at multiple previous layers.
# MAGIC
# MAGIC <img src="http://i.imgur.com/32g8Ykl.png">
# COMMAND ----------
# MAGIC %md
# MAGIC ---
# MAGIC
# MAGIC > __ASIDE: Atrous / Dilated Convolutions__
# MAGIC
# MAGIC > An atrous or dilated convolution is a convolution filter with "holes" in it. Effectively, it is a way to enlarge the filter spatially while not adding as many parameters or attending to every element in the input.
# MAGIC
# MAGIC > Why? Covering a larger input volume allows recognizing coarser-grained patterns; restricting the number of parameters is a way of regularizing or constraining the capacity of the model, making training easier.
# MAGIC
# MAGIC ---
# COMMAND ----------
# MAGIC %md
# MAGIC ## *Lab Wrapup*
# MAGIC
# MAGIC From the last lab, you should have a test accuracy of over 99.1%
# MAGIC
# MAGIC For one more activity, try changing the optimizer to old-school "sgd" -- just to see how far we've come with these modern gradient descent techniques in the last few years.
# MAGIC
# MAGIC Accuracy will end up noticeably worse ... about 96-97% test accuracy. Two key takeaways:
# MAGIC
# MAGIC * Without a good optimizer, even a very powerful network design may not achieve results
# MAGIC * In fact, we could replace the word "optimizer" there with
# MAGIC * initialization
# MAGIC * activation
# MAGIC * regularization
# MAGIC * (etc.)
# MAGIC * All of these elements we've been working with operate together in a complex way to determine final performance
# COMMAND ----------
# MAGIC %md
# MAGIC Of course this world evolves fast - see the new kid in the CNN block -- **capsule networks**
# MAGIC
# MAGIC > Hinton: “The pooling operation used in convolutional neural networks is a big mistake and the fact that it works so well is a disaster.”
# MAGIC
# MAGIC Well worth the 8 minute read:
# MAGIC * [https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b](https://medium.com/ai%C2%B3-theory-practice-business/understanding-hintons-capsule-networks-part-i-intuition-b4b559d1159b)
# MAGIC
# MAGIC To understand deeper:
# MAGIC * original paper: [https://arxiv.org/abs/1710.09829](https://arxiv.org/abs/1710.09829)
# MAGIC
# MAGIC [Keras capsule network example](https://keras.io/examples/cifar10_cnn_capsule/)
# COMMAND ----------
# MAGIC %md
# MAGIC # More resources
# MAGIC
# MAGIC - http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/
# MAGIC - https://openai.com/
# COMMAND ----------
| unlicense |
3DGenomes/tadbit | _pytadbit/mapping/analyze.py | 1 | 67679 | """
18 Nov 2014
"""
from warnings import warn
from collections import OrderedDict
from pysam import AlignmentFile
from scipy.stats import norm as sc_norm, skew, kurtosis
from scipy.stats import pearsonr, spearmanr, linregress
from scipy.sparse.linalg import eigsh
from numpy.linalg import eigh
import numpy as np
try:
from matplotlib import rcParams
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LinearSegmentedColormap
except ImportError:
warn('matplotlib not found\n')
from pytadbit import HiC_data
from pytadbit.utils.extraviews import tadbit_savefig, setup_plot
from pytadbit.utils.tadmaths import nozero_log_matrix as nozero_log
from pytadbit.utils.tadmaths import right_double_mad as mad
from pytadbit.parsers.hic_parser import load_hic_data_from_reads
from pytadbit.utils.extraviews import nicer
from pytadbit.utils.file_handling import mkdir
def hic_map(data, resolution=None, normalized=False, masked=None,
by_chrom=False, savefig=None, show=False, savedata=None,
focus=None, clim=None, perc_clim=None, cmap='jet', pdf=False, decay=True,
perc=20, name=None, decay_resolution=None, **kwargs):
"""
function to retrieve data from HiC-data object. Data can be stored as
a square matrix, or drawn using matplotlib
:param data: can be either a path to a file with pre-processed reads
(filtered or not), or a Hi-C-data object
:param None resolution: at which to bin the data (try having a dense matrix
with < 10% of cells with zero interaction counts). Note: not necessary
if a hic_data object is passed as 'data'.
:param False normalized: used normalized data, based on precalculated biases
:param masked: a list of columns to be removed. Usually because to few
interactions
:param False by_chrom: data can be stored in a partitioned way. This
parameter can take the values of:
* 'intra': one output per each chromosome will be created
* 'inter': one output per each possible pair of chromosome will be
created
* 'all' : both of the above outputs
:param None savefig: path where to store the output images. Note that, if
the by_chrom option is used, then savefig will be the name of the
directory containing the output files.
:param None savedata: path where to store the output matrices. Note that, if
the by_chrom option is used, then savefig will be the name of the
directory containing the output files.
:param None focus: can be either two number (i.e.: (1, 100)) specifying the
start and end position of the sub-matrix to display (start and end, along
the diagonal of the original matrix); or directly a chromosome name; or
two chromosome names (i.e.: focus=('chr2, chrX')), in order to store the
data corresponding to inter chromosomal interactions between these two
chromosomes
:param True decay: plot the correlation between genomic distance and
interactions (usually a decay).
:param False force_image: force to generate an image even if resolution is
crazy...
:param None clim: cutoff for the upper and lower bound in the coloring scale
of the heatmap. (perc_clim should be set to None)
:param None perc_clim: cutoff for the upper and lower bound in the coloring scale
of the heatmap; in percentile. (clim should be set to None)
:param False pdf: when using the bny_chrom option, to specify the format of
the stored images
:param jet cmap: color map to be used for the heatmap; "tadbit" color map is
also implemented and will use percentiles of the distribution of
interactions to defines intensities of red.
:param None decay_resolution: chromatin fragment size to consider when
calculating decay of the number of interactions with genomic distance.
Default is equal to resolution of the matrix.
"""
if isinstance(data, str):
data = load_hic_data_from_reads(data, resolution=resolution, **kwargs)
if not kwargs.get('get_sections', True) and decay:
warn('WARNING: not decay not available when get_sections is off.')
decay = False
if clim and perc_clim:
raise Exception('ERROR: only one of clim or perc_clim should be set\n')
hic_data = data
resolution = data.resolution
if not decay_resolution:
decay_resolution = resolution
if hic_data.bads and not masked:
masked = hic_data.bads
# save and draw the data
if by_chrom:
if focus:
raise Exception('Incompatible options focus and by_chrom\n')
if savedata:
mkdir(savedata)
if savefig:
mkdir(savefig)
for i, crm1 in enumerate(hic_data.chromosomes):
for crm2 in hic_data.chromosomes.keys()[i:]:
if by_chrom == 'intra' and crm1 != crm2:
continue
if by_chrom == 'inter' and crm1 == crm2:
continue
try:
subdata = hic_data.get_matrix(focus=(crm1, crm2), normalized=normalized)
start1, _ = hic_data.section_pos[crm1]
start2, _ = hic_data.section_pos[crm2]
masked1 = {}
masked2 = {}
if focus and hic_data.bads:
# rescale masked
masked1 = dict([(m - start1, hic_data.bads[m])
for m in hic_data.bads])
masked2 = dict([(m - start2, hic_data.bads[m])
for m in hic_data.bads])
if masked1 or masked2:
for i in xrange(len(subdata)):
if i in masked1:
subdata[i] = [float('nan')
for j in xrange(len(subdata))]
for j in xrange(len(subdata)):
if j in masked2:
subdata[i][j] = float('nan')
if savedata:
hic_data.write_matrix('%s/%s.mat' % (
savedata, '_'.join(set((crm1, crm2)))),
focus=(crm1, crm2),
normalized=normalized)
if show or savefig:
if (len(subdata) > 10000
and not kwargs.get('force_image', False)):
warn('WARNING: Matrix image not created, more than '
'10000 rows, use a lower resolution to create images')
continue
draw_map(subdata,
OrderedDict([(k, hic_data.chromosomes[k])
for k in hic_data.chromosomes.keys()
if k in [crm1, crm2]]),
hic_data.section_pos,
'%s/%s.%s' % (savefig,
'_'.join(set((crm1, crm2))),
'pdf' if pdf else 'png'),
show, one=True, clim=clim, perc_clim=perc_clim,
cmap=cmap, decay_resolution=decay_resolution,
perc=perc, name=name, cistrans=float('NaN'))
except ValueError, e:
print 'Value ERROR: problem with chromosome %s' % crm1
print str(e)
except IndexError, e:
print 'Index ERROR: problem with chromosome %s' % crm1
print str(e)
else:
if savedata:
hic_data.write_matrix(savedata, focus=focus,
normalized=normalized)
if show or savefig:
subdata = hic_data.get_matrix(focus=focus, normalized=normalized)
if (len(subdata) > 10000 and not kwargs.get('force_image', False)):
warn('WARNING: Matrix image not created, more than '
'10000 rows, use a lower resolution to create images')
return
start1 = hic_data._focus_coords(focus)[0]
if focus and masked:
# rescale masked
masked = dict([(m - start1, masked[m]) for m in masked])
if masked:
for i in xrange(len(subdata)):
if i in masked:
subdata[i] = [float('nan')
for j in xrange(len(subdata))]
for j in xrange(len(subdata)):
if j in masked:
subdata[i][j] = float('nan')
draw_map(subdata,
{} if focus else hic_data.chromosomes,
hic_data.section_pos, savefig, show,
one = True if focus else False, decay=decay,
clim=clim, perc_clim=perc_clim, cmap=cmap,
decay_resolution=decay_resolution,
perc=perc, normalized=normalized,
max_diff=kwargs.get('max_diff', None),
name=name, cistrans=float('NaN') if focus else
hic_data.cis_trans_ratio(normalized,
kwargs.get('exclude', None),
kwargs.get('diagonal', True),
kwargs.get('equals', None)))
def draw_map(data, genome_seq, cumcs, savefig, show, one=False, clim=None,
perc_clim=None, cmap='jet', decay=False, perc=20, name=None,
cistrans=None, decay_resolution=10000, normalized=False,
max_diff=None):
_ = plt.figure(figsize=(15.,12.5))
if not max_diff:
max_diff = len(data)
ax1 = plt.axes([0.34, 0.08, 0.6, 0.7205])
ax2 = plt.axes([0.07, 0.65, 0.21, 0.15])
if decay:
ax3 = plt.axes([0.07, 0.42, 0.21, 0.15])
plot_distance_vs_interactions(data, genome_seq=genome_seq, axe=ax3,
resolution=decay_resolution,
max_diff=max_diff, normalized=normalized)
ax4 = plt.axes([0.34, 0.805, 0.6, 0.04], sharex=ax1)
ax5 = plt.axes([0.34, 0.845, 0.6, 0.04], sharex=ax1)
ax6 = plt.axes([0.34, 0.885, 0.6, 0.04], sharex=ax1)
try:
minoridata = np.nanmin(data)
maxoridata = np.nanmax(data)
except AttributeError:
vals = [i for d in data for i in d if not np.isnan(i)]
minoridata = np.min(vals)
maxoridata = np.max(vals)
totaloridata = np.nansum([data[i][j] for i in xrange(len(data))
for j in xrange(i, len(data[i]))]) # may not be square
data = nozero_log(data, np.log2)
vals = np.array([i for d in data for i in d])
vals = vals[np.isfinite(vals)]
if perc_clim:
try:
clim = np.percentile(vals, perc_clim[0]), np.percentile(vals, perc_clim[1])
except ValueError:
clim = None
mindata = np.nanmin(vals)
maxdata = np.nanmax(vals)
diff = maxdata - mindata
norm = lambda x: (x - mindata) / diff
posI = 0.01 if not clim else norm(clim[0]) if clim[0] != None else 0.01
posF = 1.0 if not clim else norm(clim[1]) if clim[1] != None else 1.0
if cmap == 'tadbit':
cuts = perc
cdict = {'red' : [(0.0, 1.0, 1.0)],
'green': [(0.0, 1.0, 1.0)],
'blue' : [(0.0, 1.0, 1.0)]}
for i in np.linspace(posI, posF, cuts, endpoint=False):
prc = (i / (posF - posI)) / 1.75
pos = norm(np.percentile(vals, i * 100.))
# print '%7.4f %7.4f %7.4f %7.4f' % (prc, pos, np.percentile(vals, i * 100.), i)
cdict['red' ].append([pos, 1 , 1 ])
cdict['green'].append([pos, 1 - prc, 1 - prc])
cdict['blue' ].append([pos, 1 - prc, 1 - prc])
cdict['red' ].append([1.0, 1, 1])
cdict['green'].append([1.0, 0, 0])
cdict['blue' ].append([1.0, 0, 0])
cmap = LinearSegmentedColormap(cmap, cdict)
clim = None
else:
cmap = plt.get_cmap(cmap)
cmap.set_bad('darkgrey', 1)
ax1.imshow(data, interpolation='none',
cmap=cmap, vmin=clim[0] if clim else None, vmax=clim[1] if clim else None)
size1 = len(data)
size2 = len(data[0])
if size1 == size2:
for i in xrange(size1):
for j in xrange(i, size2):
if np.isnan(data[i][j]):
data[i][j] = 0
data[j][i] = 0
else:
for i in xrange(size1):
for j in xrange(size2):
if np.isnan(data[i][j]):
data[i][j] = 0
#data[j][i] = data[i][j]
try:
evals, evect = eigh(data)
sort_perm = evals.argsort()
evect = evect[sort_perm]
except:
evals, evect = None, None
data = [i for d in data for i in d if np.isfinite(i)]
gradient = np.linspace(np.nanmin(data),
np.nanmax(data), max(size1, size2))
gradient = np.vstack((gradient, gradient))
try:
h = ax2.hist(data, color='darkgrey', linewidth=2,
bins=20, histtype='step', density=True)
except AttributeError:
h = ax2.hist(data, color='darkgrey', linewidth=2,
bins=20, histtype='step', normed=True)
_ = ax2.imshow(gradient, aspect='auto', cmap=cmap,
vmin=clim[0] if clim else None, vmax=clim[1] if clim else None,
extent=(np.nanmin(data), np.nanmax(data) , 0, max(h[0])))
if genome_seq:
for crm in genome_seq:
ax1.vlines([cumcs[crm][0]-.5, cumcs[crm][1]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='w', linestyle='-', linewidth=1, alpha=1)
ax1.hlines([cumcs[crm][1]-.5, cumcs[crm][0]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='w', linestyle='-', linewidth=1, alpha=1)
ax1.vlines([cumcs[crm][0]-.5, cumcs[crm][1]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='k', linestyle='--')
ax1.hlines([cumcs[crm][1]-.5, cumcs[crm][0]-.5], cumcs[crm][0]-.5, cumcs[crm][1]-.5,
color='k', linestyle='--')
if not one:
vals = [0]
keys = ['']
for crm in genome_seq:
vals.append(cumcs[crm][0])
keys.append(crm)
vals.append(cumcs[crm][1])
ax1.set_yticks(vals)
ax1.set_yticklabels('')
ax1.set_yticks([float(vals[i]+vals[i+1])/2
for i in xrange(len(vals) - 1)], minor=True)
ax1.set_yticklabels(keys, minor=True)
for t in ax1.yaxis.get_minor_ticks():
t.tick1On = False
t.tick2On = False
# totaloridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(totaloridata)[::-1])])[::-1].strip(',')
# minoridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(minoridata)[::-1])])[::-1].strip(',')
# maxoridata = ''.join([j + ('' if (i+1)%3 else ',') for i, j in enumerate(str(maxoridata)[::-1])])[::-1].strip(',')
plt.figtext(0.05,0.25, ''.join([
(name + '\n') if name else '',
'Number of interactions: %s\n' % str(totaloridata),
('' if np.isnan(cistrans) else
('Percentage of cis interactions: %.0f%%\n' % (cistrans*100))),
'Min interactions: %s\n' % (minoridata),
'Max interactions: %s\n' % (maxoridata)]))
ax2.set_xlim((np.nanmin(data), np.nanmax(data)))
ax2.set_ylim((0, max(h[0])))
ax1.set_xlim ((-0.5, size1 - .5))
ax1.set_ylim ((-0.5, size2 - .5))
ax2.set_xlabel('log interaction count')
# we reduce the number of dots displayed.... we just want to see the shape
subdata = np.array(list(set([float(int(d*100))/100 for d in data])))
try:
normfit = sc_norm.pdf(subdata, np.nanmean(data), np.nanstd(data))
except AttributeError:
normfit = sc_norm.pdf(subdata, np.mean(data), np.std(data))
ax2.plot(subdata, normfit, 'w.', markersize=2.5, alpha=.4)
ax2.plot(subdata, normfit, 'k.', markersize=1.5, alpha=1)
ax2.set_title('skew: %.3f, kurtosis: %.3f' % (skew(data),
kurtosis(data)))
try:
ax4.vlines(range(size1), 0, evect[:,-1], color='k')
except (TypeError, IndexError):
pass
ax4.hlines(0, 0, size2, color='red')
ax4.set_ylabel('E1')
ax4.set_yticklabels([])
try:
ax5.vlines(range(size1), 0, evect[:,-2], color='k')
except (TypeError, IndexError):
pass
ax5.hlines(0, 0, size2, color='red')
ax5.set_ylabel('E2')
ax5.set_yticklabels([])
try:
ax6.vlines(range(size1), 0, evect[:,-3], color='k')
except (TypeError, IndexError):
pass
ax6.hlines(0, 0, size2, color='red')
ax6.set_ylabel('E3')
ax6.set_yticklabels([])
xticklabels = ax4.get_xticklabels() + ax5.get_xticklabels() + ax6.get_xticklabels()
plt.setp(xticklabels, visible=False)
if savefig:
tadbit_savefig(savefig)
elif show:
plt.show()
plt.close('all')
def plot_distance_vs_interactions(data, min_diff=1, max_diff=1000, show=False,
genome_seq=None, resolution=None, axe=None,
savefig=None, normalized=False,
plot_each_cell=False):
"""
Plot the number of interactions observed versus the genomic distance between
the mapped ends of the read. The slope is expected to be around -1, in
logarithmic scale and between 700 kb and 10 Mb (according to the prediction
of the fractal globule model).
:param data: input file name (either tsv or TADbit generated BAM), or
HiC_data object or list of lists
:param 10 min_diff: lower limit (in number of bins)
:param 1000 max_diff: upper limit (in number of bins) to look for
:param 100 resolution: group reads that are closer than this resolution
parameter
:param_hash False plot_each_cell: if false, only the mean distances by bin
will be represented, otherwise each pair of interactions will be plotted.
:param None axe: a matplotlib.axes.Axes object to define the plot
appearance
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:returns: slope, intercept and R square of each of the 3 correlations
"""
if isinstance(data, basestring):
resolution = resolution or 1
dist_intr = dict([(i, {})
for i in xrange(min_diff, max_diff)])
fhandler = open(data)
line = fhandler.next()
while line.startswith('#'):
line = fhandler.next()
try:
while True:
_, cr1, ps1, _, _, _, _, cr2, ps2, _ = line.split('\t', 9)
if cr1 != cr2:
line = fhandler.next()
continue
diff = abs(int(ps1) / resolution - int(ps2) / resolution)
if max_diff > diff >= min_diff:
try:
dist_intr[diff][int(ps1) / resolution] += 1.
except KeyError:
dist_intr[diff][int(ps1) / resolution] = 1.
line = fhandler.next()
except StopIteration:
pass
fhandler.close()
for diff in dist_intr:
dist_intr[diff] = [dist_intr[diff].get(k, 0)
for k in xrange(max(dist_intr[diff]) - diff)]
elif isinstance(data, HiC_data):
resolution = resolution or data.resolution
dist_intr = dict([(i, []) for i in xrange(min_diff, max_diff)])
if normalized:
get_data = lambda x, y: data[x, y] / data.bias[x] / data.bias[y]
else:
get_data = lambda x, y: data[x, y]
max_diff = min(len(data), max_diff)
if data.section_pos:
for crm in data.section_pos:
for diff in xrange(min_diff, min(
(max_diff, 1 + data.chromosomes[crm]))):
for i in xrange(data.section_pos[crm][0],
data.section_pos[crm][1] - diff):
dist_intr[diff].append(get_data(i, i + diff))
else:
for diff in xrange(min_diff, max_diff):
for i in xrange(len(data) - diff):
if not np.isnan(data[i, i + diff]):
dist_intr[diff].append(get_data(i, diff))
elif isinstance(data, dict): # if we pass decay/expected dictionary, computes weighted mean
dist_intr = {}
for i in range(min_diff, max_diff):
val = [data[c][i] for c in data
if i in data[c] and data[c][i] != data[c].get(i-1, 0)]
if val:
dist_intr[i] = [sum(val) / float(len(val))]
else:
dist_intr[i] = [0]
else:
dist_intr = dict([(i, []) for i in xrange(min_diff, max_diff)])
if genome_seq:
max_diff = min(max(genome_seq.values()), max_diff)
cnt = 0
for crm in genome_seq:
for diff in xrange(min_diff, min(
(max_diff, genome_seq[crm]))):
for i in xrange(cnt, cnt + genome_seq[crm] - diff):
if not np.isnan(data[i][i + diff]):
dist_intr[diff].append(data[i][i + diff])
cnt += genome_seq[crm]
else:
max_diff = min(len(data), max_diff)
for diff in xrange(min_diff, max_diff):
for i in xrange(len(data) - diff):
if not np.isnan(data[i][i + diff]):
dist_intr[diff].append(data[i][i + diff])
resolution = resolution or 1
if not axe:
fig=plt.figure()
axe = fig.add_subplot(111)
# remove last part of the plot in case no interaction is count... reduce max_dist
for diff in xrange(max_diff - 1, min_diff, -1):
try:
if not dist_intr[diff]:
del(dist_intr[diff])
max_diff -=1
continue
except KeyError:
max_diff -=1
continue
break
# get_cmap the mean values perc bins
mean_intr = dict([(i, float(sum(dist_intr[i])) / len(dist_intr[i]))
for i in dist_intr if len(dist_intr[i])])
if plot_each_cell:
xp, yp = [], []
for x, y in sorted(dist_intr.items(), key=lambda x:x[0]):
xp.extend([x] * len(y))
yp.extend(y)
x = []
y = []
for k in xrange(len(xp)):
if yp[k]:
x.append(xp[k])
y.append(yp[k])
axe.plot(x, y, color='grey', marker='.', alpha=0.1, ms=1,
linestyle='None')
xp, yp = zip(*sorted(mean_intr.items(), key=lambda x:x[0]))
x = []
y = []
for k in xrange(len(xp)):
if yp[k]:
x.append(xp[k])
y.append(yp[k])
axe.plot(x, y, 'k.', alpha=0.4)
best = (float('-inf'), 0, 0, 0, 0, 0, 0, 0, 0, 0)
logx = np.log(x)
logy = np.log(y)
ntries = 100
# set k for better fit
# for k in xrange(1, ntries/5, ntries/5/5):
if resolution == 1:
k = 1
for i in xrange(3, ntries-2-k):
v1 = i * len(x) / ntries
try:
a1, b1, r21, _, _ = linregress(logx[ :v1], logy[ :v1])
except ValueError:
a1 = b1 = r21 = 0
r21 *= r21
for j in xrange(i + 1 + k, ntries - 2 - k):
v2 = j * len(x) / ntries
try:
a2, b2, r22, _, _ = linregress(logx[v1+k:v2], logy[v1+k:v2])
a3, b3, r23, _, _ = linregress(logx[v2+k: ], logy[v2+k: ])
except ValueError:
a2 = b2 = r22 = 0
a3 = b3 = r23 = 0
r2 = r21 + r22**2 + r23**2
if r2 > best[0]:
best = (r2, v1, v2, a1, a2, a3,
b1, b2, b3, k)
# plot line of best fit
(v1, v2,
a1, a2, a3,
b1, b2, b3, k) = best[1:]
yfit1 = lambda xx: np.exp(b1 + a1*np.array (np.log(xx)))
yfit2 = lambda xx: np.exp(b2 + a2*np.array (np.log(xx)))
yfit3 = lambda xx: np.exp(b3 + a3*np.array (np.log(xx)))
axe.plot(x[ :v1], yfit1(x[ :v1] ), color= 'yellow', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0-0.7 \mathrm{ Mb}' if resolution != 1 else '1', a1))
#label = r'$\alpha_1=%.2f$ (0-%d)' % (a1, x[v1]))
axe.plot(x[v1+k:v2], yfit2(x[v1+k:v2]), color= 'orange', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0.7-10 \mathrm{ Mb}' if resolution != 1 else '2', a2))
# label = r'$\alpha_2=%.2f$ (%d-%d)' % (a2, x[v1], x[v2]))
axe.plot(x[v2+k: ], yfit3(x[v2+k: ] ), color= 'red' , lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'10 \mathrm{ Mb}-\infty' if resolution != 1 else '3', a3))
# label = r'$\alpha_3=%.2f$ (%d-$\infty$)' % (a3, x[v2+k]))
else:
# from 0.7 Mb
v1 = 700000 / resolution
# to 10 Mb
v2 = 10000000 / resolution
try:
a1, b1, r21, _, _ = linregress(logx[ :v1], logy[ :v1])
except ValueError:
a1, b1, r21 = 0, 0, 0
try:
a2, b2, r22, _, _ = linregress(logx[v1:v2], logy[v1:v2])
except ValueError:
a2, b2, r22 = 0, 0, 0
try:
a3, b3, r23, _, _ = linregress(logx[v2: ], logy[v2: ])
except ValueError:
a3, b3, r23 = 0, 0, 0
yfit1 = lambda xx: np.exp(b1 + a1*np.array (np.log(xx)))
yfit2 = lambda xx: np.exp(b2 + a2*np.array (np.log(xx)))
yfit3 = lambda xx: np.exp(b3 + a3*np.array (np.log(xx)))
axe.plot(x[ :v1], yfit1(x[ :v1] ), color= 'yellow', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0-0.7 \mathrm{ Mb}' if resolution != 1 else '1', a1))
#label = r'$\alpha_1=%.2f$ (0-%d)' % (a1, x[v1]))
axe.plot(x[v1:v2], yfit2(x[v1:v2]), color= 'orange', lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'0.7-10 \mathrm{ Mb}' if resolution != 1 else '2', a2))
# label = r'$\alpha_2=%.2f$ (%d-%d)' % (a2, x[v1], x[v2]))
axe.plot(x[v2: ], yfit3(x[v2: ] ), color= 'red' , lw=2,
label = r'$\alpha_{%s}=%.2f$' % (
'10 \mathrm{ Mb}-\infty' if resolution != 1 else '3', a3))
# label = r'$\alpha_3=%.2f$ (%d-$\infty$)' % (a3, x[v2+k]))
axe.set_ylabel('Log interaction count')
axe.set_xlabel('Log genomic distance (resolution: %s)' % nicer(resolution))
axe.legend(loc='lower left', frameon=False)
axe.set_xscale('log')
axe.set_yscale('log')
axe.set_xlim((min_diff, max_diff))
try:
axe.set_ylim((0, max(y)))
except ValueError:
pass
if savefig:
tadbit_savefig(savefig)
plt.close('all')
elif show:
plt.show()
plt.close('all')
return (a1, b1, r21), (a2, b2, r22), (a3, b3, r23)
def plot_iterative_mapping(fnam1, fnam2, total_reads=None, axe=None, savefig=None):
"""
Plots the number of reads mapped at each step of the mapping process (in the
case of the iterative mapping, each step is mapping process with a given
size of fragments).
:param fnam: input file name
:param total_reads: total number of reads in the initial FASTQ file
:param None axe: a matplotlib.axes.Axes object to define the plot
appearance
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:returns: a dictionary with the number of reads per mapped length
"""
count_by_len = {}
total_reads = total_reads or 1
if not axe:
fig=plt.figure()
_ = fig.add_subplot(111)
colors = ['olive', 'darkcyan']
iteration = False
for i, fnam in enumerate([fnam1, fnam2]):
fhandler = open(fnam)
line = fhandler.next()
count_by_len[i] = {}
while line.startswith('#'):
if line.startswith('# MAPPED '):
itr, num = line.split()[2:]
count_by_len[i][int(itr)] = int(num)
line = fhandler.next()
if not count_by_len[i]:
iteration = True
try:
while True:
_, length, _, _ = line.rsplit('\t', 3)
try:
count_by_len[i][int(length)] += 1
except KeyError:
count_by_len[i][int(length)] = 1
line = fhandler.next()
except StopIteration:
pass
fhandler.close()
lengths = sorted(count_by_len[i].keys())
for k in lengths[::-1]:
count_by_len[i][k] += sum([count_by_len[i][j]
for j in lengths if j < k])
plt.plot(lengths, [float(count_by_len[i][l]) / total_reads
for l in lengths],
label='read' + str(i + 1), linewidth=2, color=colors[i])
if iteration:
plt.xlabel('read length (bp)')
else:
plt.xlabel('Iteration number')
if total_reads != 1:
plt.ylabel('Proportion of mapped reads')
else:
plt.ylabel('Number of mapped reads')
plt.legend(loc=4)
if savefig:
tadbit_savefig(savefig)
elif not axe:
plt.show()
plt.close('all')
return count_by_len
def fragment_size(fnam, savefig=None, nreads=None, max_size=99.9, axe=None,
show=False, xlog=False, stats=('median', 'perc_max'),
too_large=10000):
"""
Plots the distribution of dangling-ends lengths
:param fnam: input file name
:param None savefig: path where to store the output images.
:param 99.9 max_size: top percentage of distances to consider, within the
top 0.01% are usually found very long outliers.
:param False xlog: represent x axis in logarithmic scale
:param ('median', 'perc_max') stats: returns this set of values calculated from the
distribution of insert/fragment sizes. Possible values are:
- 'median' median of the distribution
- 'mean' mean of the distribution
- 'perc_max' percentil defined by the other parameter 'max_size'
- 'first_deacay' starting from the median of the distribution to the
first window where 10 consecutive insert sizes are counted less than
a given value (this given value is equal to the sum of all
sizes divided by 100 000)
- 'MAD' Double Median Adjusted Deviation
:param 10000 too_large: upper bound limit for fragment size to consider
:param None nreads: number of reads to process (default: all reads)
:returns: the median value and the percentile inputed as max_size.
"""
distr = {}
genome_seq = OrderedDict()
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
des = []
for line in fhandler:
(crm1, pos1, dir1, _, re1, _,
crm2, pos2, dir2, _, re2) = line.strip().split('\t')[1:12]
if re1 == re2 and crm1 == crm2 and dir1 == '1' and dir2 == '0':
pos1, pos2 = int(pos1), int(pos2)
des.append(pos2 - pos1)
if len(des) == nreads:
break
des = [i for i in des if i <= too_large]
fhandler.close()
if not des:
raise Exception('ERROR: no dangling-ends found in %s' % (fnam))
max_perc = np.percentile(des, max_size)
perc99 = np.percentile(des, 99)
perc01 = np.percentile(des, 1)
perc50 = np.percentile(des, 50)
meanfr = np.mean(des)
perc95 = np.percentile(des, 95)
perc05 = np.percentile(des, 5)
to_return = {'median': perc50}
cutoff = len(des) / 100000.
count = 0
for v in xrange(int(perc50), int(max(des))):
if des.count(v) < cutoff:
count += 1
else:
count = 0
if count >= 10:
to_return['first_decay'] = v - 10
break
else:
raise Exception('ERROR: not found')
to_return['perc_max'] = max_perc
to_return['MAD'] = mad(des)
to_return['mean'] = meanfr
if not savefig and not axe and not show:
return [to_return[k] for k in stats]
ax = setup_plot(axe, figsize=(10, 5.5))
desapan = ax.axvspan(perc95, perc99, facecolor='black', alpha=.2,
label='1-99%% DEs\n(%.0f-%.0f nts)' % (perc01, perc99))
ax.axvspan(perc01, perc05, facecolor='black', alpha=.2)
desapan = ax.axvspan(perc05, perc95, facecolor='black', alpha=.4,
label='5-95%% DEs\n(%.0f-%.0f nts)' % (perc05, perc95))
deshist = ax.hist(des, bins=100, range=(0, max_perc), lw=2,
alpha=.5, edgecolor='darkred', facecolor='darkred', label='Dangling-ends')
ylims = ax.get_ylim()
plots = []
ax.set_xlabel('Genomic distance between reads')
ax.set_ylabel('Count')
ax.set_title('Distribution of dangling-ends ' +
'lenghts\nmedian: %s (mean: %s), top %.1f%%: %0.f nts' % (
int(perc50), int(meanfr), max_size, int(max_perc)))
if xlog:
ax.set_xscale('log')
ax.set_xlim((50, max_perc))
plt.subplots_adjust(left=0.1, right=0.75)
ax.legend(bbox_to_anchor=(1.4, 1), frameon=False)
if savefig:
tadbit_savefig(savefig)
elif show and not axe:
plt.show()
plt.close('all')
return [to_return[k] for k in stats]
def plot_genomic_distribution(fnam, first_read=None, resolution=10000,
ylim=None, yscale=None, savefig=None, show=False,
savedata=None, chr_names=None, nreads=None):
"""
Plot the number of reads in bins along the genome (or along a given
chromosome).
:param fnam: input file name
:param True first_read: uses first read.
:param 100 resolution: group reads that are closer than this resolution
parameter
:param None ylim: a tuple of lower and upper bound for the y axis
:param None yscale: if set_bad to "log" values will be represented in log2
scale
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:param None savedata: path where to store the output read counts per bin.
:param None chr_names: can pass a list of chromosome names in case only some
them the need to be plotted (this option may last even more than default)
:param None nreads: number of reads to process (default: all reads)
"""
if first_read:
warn('WARNING: first_read parameter should no loonger be used.')
distr = {}
genome_seq = OrderedDict()
if chr_names:
chr_names = set(chr_names)
cond1 = lambda x: x not in chr_names
else:
cond1 = lambda x: False
if nreads:
cond2 = lambda x: x >= nreads
else:
cond2 = lambda x: False
cond = lambda x, y: cond1(x) or cond2(y)
count = 0
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
for line in fhandler:
line = line.strip().split('\t')
count += 1
for idx1, idx2 in ((1, 3), (7, 9)):
crm, pos = line[idx1:idx2]
if cond(crm, count):
if cond2(count):
break
continue
pos = int(pos) / resolution
try:
distr[crm][pos] += 1
except KeyError:
try:
distr[crm][pos] = 1
except KeyError:
distr[crm] = {pos: 1}
else:
continue
break
fhandler.close()
if savefig or show:
_ = plt.figure(figsize=(15, 1 + 3 * len(
chr_names if chr_names else distr.keys())))
max_y = max([max(distr[c].values()) for c in distr])
max_x = max([len(distr[c].values()) for c in distr])
ncrms = len(chr_names if chr_names else genome_seq if genome_seq else distr)
data = {}
for i, crm in enumerate(chr_names if chr_names else genome_seq
if genome_seq else distr):
try:
# data[crm] = [distr[crm].get(j, 0) for j in xrange(max(distr[crm]))] # genome_seq[crm]
data[crm] = [distr[crm].get(j, 0)
for j in xrange(genome_seq[crm] / resolution + 1)]
if savefig or show:
plt.subplot(ncrms, 1, i + 1)
plt.plot(range(genome_seq[crm] / resolution + 1), data[crm],
color='red', lw=1.5, alpha=0.7)
if yscale:
plt.yscale(yscale)
except KeyError:
pass
if savefig or show:
if ylim:
plt.vlines(genome_seq[crm] / resolution, ylim[0], ylim[1])
else:
plt.vlines(genome_seq[crm] / resolution, 0, max_y)
plt.xlim((0, max_x))
plt.ylim(ylim or (0, max_y))
plt.title(crm)
if savefig:
tadbit_savefig(savefig)
if not show:
plt.close('all')
elif show:
plt.show()
if savedata:
out = open(savedata, 'w')
out.write('# CRM\tstart-end\tcount\n')
out.write('\n'.join('%s\t%d-%d\t%d' % (c, (i * resolution) + 1,
((i + 1) * resolution), v)
for c in data for i, v in enumerate(data[c])))
out.write('\n')
out.close()
def _unitize(vals):
return np.argsort(vals) / float(len(vals))
def correlate_matrices(hic_data1, hic_data2, max_dist=10, intra=False, axe=None,
savefig=None, show=False, savedata=None, min_dist=1,
normalized=False, remove_bad_columns=True, **kwargs):
"""
Compare the interactions of two Hi-C matrices at a given distance,
with Spearman rank correlation.
Also computes the SCC reproducibility score as in HiCrep (see
https://doi.org/10.1101/gr.220640.117). It's implementation is inspired
by the version implemented in dryhic by Enrique Vidal
(https://github.com/qenvio/dryhic).
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 1 resolution: to be used for scaling the plot
:param 10 max_dist: maximum distance from diagonal (e.g. 10 mean we will
not look further than 10 times the resolution)
:param 1 min_dist: minimum distance from diagonal (set to 0 to reproduce
result from HicRep)
:param None savefig: path to save the plot
:param False intra: only takes into account intra-chromosomal contacts
:param False show: displays the plot
:param False normalized: use normalized data
:param True remove_bads: computes the union of bad columns between samples
and exclude them from the comparison
:returns: list of correlations, list of genomic distances, SCC and standard
deviation of SCC
"""
spearmans = []
pearsons = []
dists = []
weigs = []
if normalized:
get_the_guy1 = lambda i, j: (hic_data1[j, i] / hic_data1.bias[i] /
hic_data1.bias[j])
get_the_guy2 = lambda i, j: (hic_data2[j, i] / hic_data2.bias[i] /
hic_data2.bias[j])
else:
get_the_guy1 = lambda i, j: hic_data1[j, i]
get_the_guy2 = lambda i, j: hic_data2[j, i]
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
if (intra and hic_data1.sections and hic_data2.sections and
hic_data1.sections == hic_data2.sections):
for dist in xrange(1, max_dist + 1):
diag1 = []
diag2 = []
for crm in hic_data1.section_pos:
for j in xrange(hic_data1.section_pos[crm][0],
hic_data1.section_pos[crm][1] - dist):
i = j + dist
if j in bads or i in bads:
continue
diag1.append(get_the_guy1(i, j))
diag2.append(get_the_guy2(i, j))
spearmans.append(spearmanr(diag1, diag2)[0])
pearsons.append(spearmanr(diag1, diag2)[0])
r1 = _unitize(diag1)
r2 = _unitize(diag2)
weigs.append((np.var(r1, ddof=1) *
np.var(r2, ddof=1))**0.5 * len(diag1))
dists.append(dist)
else:
if intra:
warn('WARNING: hic_dta does not contain chromosome coordinates, ' +
'intra set to False')
for dist in xrange(min_dist, max_dist + min_dist):
diag1 = []
diag2 = []
for j in xrange(len(hic_data1) - dist):
i = j + dist
if j in bads or i in bads:
continue
diag1.append(get_the_guy1(i, j))
diag2.append(get_the_guy2(i, j))
spearmans.append(spearmanr(diag1, diag2)[0])
pearsons.append(pearsonr(diag1, diag2)[0])
r1 = _unitize(diag1)
r2 = _unitize(diag2)
weigs.append((np.var(r1, ddof=1) *
np.var(r2, ddof=1))**0.5 * len(diag1))
dists.append(dist)
# compute scc
# print pearsons
# print weigs
tot_weigth = sum(weigs)
scc = sum(pearsons[i] * weigs[i] / tot_weigth
for i in xrange(len(pearsons)))
var_corr = np.var(pearsons, ddof=1)
std = (sum(weigs[i]**2 for i in xrange(len(pearsons))) * var_corr /
sum(weigs)**2)**0.5
# plot
if show or savefig or axe:
if not axe:
fig = plt.figure()
axe = fig.add_subplot(111)
given_axe = False
else:
given_axe = True
axe.plot(dists, spearmans, color='orange', linewidth=3, alpha=.8)
axe.set_xlabel('Genomic distance in bins')
axe.set_ylabel('Spearman rank correlation')
axe.set_xlim((0, dists[-1]))
if savefig:
tadbit_savefig(savefig)
if show:
plt.show()
if not given_axe:
plt.close('all')
if savedata:
out = open(savedata, 'w')
out.write('# genomic distance\tSpearman rank correlation\n')
for i in xrange(len(spearmans)):
out.write('%s\t%s\n' % (dists[i], spearmans[i]))
out.close()
if kwargs.get('get_bads', False):
return spearmans, dists, scc, std, bads
return spearmans, dists, scc, std
def _evec_dist(v1,v2):
d1=np.dot(v1-v2,v1-v2)
d2=np.dot(v1+v2,v1+v2)
if d1<d2:
d=d1
else:
d=d2
return np.sqrt(d)
def _get_Laplacian(M):
S=M.sum(1)
i_nz=np.where(S>0)[0]
S=S[i_nz]
M=(M[i_nz].T)[i_nz].T
S=1/np.sqrt(S)
M=S*M
M=(S*M.T).T
n=np.size(S)
M=np.identity(n)-M
M=(M+M.T)/2
return M
def get_ipr(evec):
ipr=1.0/(evec*evec*evec*evec).sum()
return ipr
def get_reproducibility(hic_data1, hic_data2, num_evec, verbose=True,
normalized=False, remove_bad_columns=True):
"""
Compute reproducibility score similarly to HiC-spector
(https://doi.org/10.1093/bioinformatics/btx152)
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 20 num_evec: number of eigenvectors to compare
:returns: reproducibility score (bellow 0.5 ~ different cell types)
"""
M1 = hic_data1.get_matrix(normalized=normalized)
M2 = hic_data2.get_matrix(normalized=normalized)
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
# remove them form both matrices
for bad in sorted(bads, reverse=True):
del(M1[bad])
del(M2[bad])
for i in xrange(len(M1)):
_ = M1[i].pop(bad)
_ = M2[i].pop(bad)
M1 = np.matrix(M1)
M2 = np.matrix(M2)
k1=np.sign(M1.A).sum(1)
d1=np.diag(M1.A)
kd1=~((k1==1)*(d1>0))
k2=np.sign(M2.A).sum(1)
d2=np.diag(M2.A)
kd2=~((k2==1)*(d2>0))
iz=np.nonzero((k1+k2>0)*(kd1>0)*(kd2>0))[0]
M1b=(M1[iz].A.T)[iz].T
M2b=(M2[iz].A.T)[iz].T
i_nz1=np.where(M1b.sum(1)>0)[0]
i_nz2=np.where(M2b.sum(1)>0)[0]
i_z1=np.where(M1b.sum(1)==0)[0]
i_z2=np.where(M2b.sum(1)==0)[0]
M1b_L=_get_Laplacian(M1b)
M2b_L=_get_Laplacian(M2b)
a1, b1=eigsh(M1b_L,k=num_evec,which="SM")
a2, b2=eigsh(M2b_L,k=num_evec,which="SM")
b1_extend=np.zeros((np.size(M1b,0),num_evec))
b2_extend=np.zeros((np.size(M2b,0),num_evec))
for i in range(num_evec):
b1_extend[i_nz1,i]=b1[:,i]
b2_extend[i_nz2,i]=b2[:,i]
ipr_cut=5
ipr1=np.zeros(num_evec)
ipr2=np.zeros(num_evec)
for i in range(num_evec):
ipr1[i]=get_ipr(b1_extend[:,i])
ipr2[i]=get_ipr(b2_extend[:,i])
b1_extend_eff=b1_extend[:,ipr1>ipr_cut]
b2_extend_eff=b2_extend[:,ipr2>ipr_cut]
num_evec_eff=min(np.size(b1_extend_eff,1),np.size(b2_extend_eff,1))
evd=np.zeros(num_evec_eff)
for i in range(num_evec_eff):
evd[i]=_evec_dist(b1_extend_eff[:,i],b2_extend_eff[:,i])
Sd=evd.sum()
l=np.sqrt(2)
evs=abs(l-Sd/num_evec_eff)/l
N = float(M1.shape[1])
if verbose:
if (np.sum(ipr1>N/100)<=1)|(np.sum(ipr2>N/100)<=1):
print("at least one of the maps does not look like typical Hi-C maps")
else:
print("size of maps: %d" %(np.size(M1,0)))
print("reproducibility score: %6.3f " %(evs))
print("num_evec_eff: %d" %(num_evec_eff))
return evs
def eig_correlate_matrices(hic_data1, hic_data2, nvect=6, normalized=False,
savefig=None, show=False, savedata=None,
remove_bad_columns=True, **kwargs):
"""
Compare the interactions of two Hi-C matrices using their 6 first
eigenvectors, with Pearson correlation
:param hic_data1: Hi-C-data object
:param hic_data2: Hi-C-data object
:param 6 nvect: number of eigenvectors to compare
:param None savefig: path to save the plot
:param False show: displays the plot
:param False normalized: use normalized data
:param True remove_bads: computes the union of bad columns between samples
and exclude them from the comparison
:param kwargs: any argument to pass to matplotlib imshow function
:returns: matrix of correlations
"""
data1 = hic_data1.get_matrix(normalized=normalized)
data2 = hic_data2.get_matrix(normalized=normalized)
## reduce matrices to remove bad columns
if remove_bad_columns:
# union of bad columns
bads = hic_data1.bads.copy()
bads.update(hic_data2.bads)
# remove them form both matrices
for bad in sorted(bads, reverse=True):
del(data1[bad])
del(data2[bad])
for i in xrange(len(data1)):
_ = data1[i].pop(bad)
_ = data2[i].pop(bad)
# get the log
data1 = nozero_log(data1, np.log2)
data2 = nozero_log(data2, np.log2)
# get the eigenvectors
ev1, evect1 = eigh(data1)
ev2, evect2 = eigh(data2)
corr = [[0 for _ in xrange(nvect)] for _ in xrange(nvect)]
# sort eigenvectors according to their eigenvalues => first is last!!
sort_perm = ev1.argsort()
ev1.sort()
evect1 = evect1[sort_perm]
sort_perm = ev2.argsort()
ev2.sort()
evect2 = evect2[sort_perm]
# calculate Pearson correlation
for i in xrange(nvect):
for j in xrange(nvect):
corr[i][j] = abs(pearsonr(evect1[:,-i-1],
evect2[:,-j-1])[0])
# plot
axe = plt.axes([0.1, 0.1, 0.6, 0.8])
cbaxes = plt.axes([0.85, 0.1, 0.03, 0.8])
if show or savefig:
im = axe.imshow(corr, interpolation="nearest",origin='lower', **kwargs)
axe.set_xlabel('Eigen Vectors exp. 1')
axe.set_ylabel('Eigen Vectors exp. 2')
axe.set_xticks(range(nvect))
axe.set_yticks(range(nvect))
axe.set_xticklabels(range(1, nvect + 2))
axe.set_yticklabels(range(1, nvect + 2))
axe.xaxis.set_tick_params(length=0, width=0)
axe.yaxis.set_tick_params(length=0, width=0)
cbar = plt.colorbar(im, cax = cbaxes )
cbar.ax.set_ylabel('Pearson correlation', rotation=90*3,
verticalalignment='bottom')
axe2 = axe.twinx()
axe2.set_yticks(range(nvect))
axe2.set_yticklabels(['%.1f' % (e) for e in ev2[-nvect:][::-1]])
axe2.set_ylabel('corresponding Eigen Values exp. 2', rotation=90*3,
verticalalignment='bottom')
axe2.set_ylim((-0.5, nvect - 0.5))
axe2.yaxis.set_tick_params(length=0, width=0)
axe3 = axe.twiny()
axe3.set_xticks(range(nvect))
axe3.set_xticklabels(['%.1f' % (e) for e in ev1[-nvect:][::-1]])
axe3.set_xlabel('corresponding Eigen Values exp. 1')
axe3.set_xlim((-0.5, nvect - 0.5))
axe3.xaxis.set_tick_params(length=0, width=0)
axe.set_ylim((-0.5, nvect - 0.5))
axe.set_xlim((-0.5, nvect - 0.5))
if savefig:
tadbit_savefig(savefig)
if show:
plt.show()
plt.close('all')
if savedata:
out = open(savedata, 'w')
out.write('# ' + '\t'.join(['Eigen Vector %s'% i
for i in xrange(nvect)]) + '\n')
for i in xrange(nvect):
out.write('\t'.join([str(corr[i][j])
for j in xrange(nvect)]) + '\n')
out.close()
if kwargs.get('get_bads', False):
return corr, bads
else:
return corr
def plot_rsite_reads_distribution(reads_file, outprefix, window=20,
maxdist=1000):
de_right={}
de_left={}
print "process reads"
fl=open(reads_file)
while True:
line=fl.next()
if not line.startswith('#'):
break
nreads=0
try:
while True:
nreads += 1
if nreads % 1000000 == 0:
print nreads
try:
_, n1, sb1, sd1, l1, ru1, rd1, n2, sb2, sd2, l2, ru2, rd2\
= line.split()
sb1, sd1, l1, ru1, rd1, sb2, sd2, l2, ru2, rd2 = \
map(int, [sb1, sd1, l1, ru1, rd1, sb2, sd2, l2,
ru2, rd2])
except ValueError:
print line
raise ValueError("line is not the right format!")
if n1 != n2:
line=fl.next()
continue
#read1 ahead of read2
if sb1 > sb2:
sb1, sd1, l1, ru1, rd1, sb2, sd2, l2, ru2, rd2 = \
sb2, sd2, l2, ru2, rd2, sb1, sd1, l1, ru1, rd1
#direction always -> <-
if not (sd1 == 1 and sd2 == 0):
line=fl.next()
continue
#close to the diagonal
if sb2-sb1 > maxdist:
line=fl.next()
continue
#close to RE 1
if abs(sb1-ru1) < abs(sb1-rd1):
rc1=ru1
else:
rc1=rd1
pos=sb1-rc1
if abs(pos)<=window:
if not pos in de_right:
de_right[pos]=0
de_right[pos]+=1
#close to RE 2
if abs(sb2-ru2) < abs(sb2-rd2):
rc2=ru2
else:
rc2=rd2
pos=sb2-rc2
if abs(pos)<=window:
if not pos in de_left:
de_left[pos]=0
de_left[pos]+=1
line=fl.next()
except StopIteration:
pass
print " finished processing {} reads".format(nreads)
#transform to arrays
ind = range(-window,window+1)
de_r = map(lambda x:de_right.get(x,0), ind)
de_l = map(lambda x:de_left.get(x,0), ind)
#write to files
print "write to files"
fl=open(outprefix+'_count.dat','w')
fl.write('#dist\tX~~\t~~X\n')
for i,j,k in zip(ind,de_r, de_l):
fl.write('{}\t{}\t{}\n'.format(i, j, k))
#write plot
rcParams.update({'font.size': 10})
pp = PdfPages(outprefix+'_plot.pdf')
ind = np.array(ind)
width = 1
pr = plt.bar(ind-0.5, de_r, width, color='r')
pl = plt.bar(ind-0.5, de_l, width, bottom=de_r, color='b')
plt.ylabel("Count")
plt.title("Histogram of counts around cut site")
plt.xticks(ind[::2], rotation="vertical")
plt.legend((pl[0], pr[0]), ("~~X", "X~~"))
plt.gca().set_xlim([-window-1,window+1])
pp.savefig()
pp.close()
def moving_average(a, n=3):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def plot_diagonal_distributions(reads_file, outprefix, ma_window=20,
maxdist=800, de_left=[-2,3], de_right=[0,5]):
rbreaks={}
rejoined={}
des={}
print "process reads"
fl=open(reads_file)
while True:
line=fl.next()
if not line.startswith('#'):
break
nreads=0
try:
while True:
nreads += 1
if nreads % 1000000 == 0:
print nreads
try:
_, n1, sb1, sd1, _, ru1, rd1, n2, sb2, sd2, _, ru2, rd2\
= line.split()
sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2 = \
map(int, [sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2])
except ValueError:
print line
raise ValueError("line is not the right format!")
if n1 != n2:
line=fl.next()
continue
#read1 ahead of read2
if sb1 > sb2:
sb1, sd1, ru1, rd1, sb2, sd2, ru2, rd2 = \
sb2, sd2, ru2, rd2, sb1, sd1, ru1, rd1
#direction always -> <-
if not (sd1 == 1 and sd2 == 0):
line=fl.next()
continue
mollen = sb2-sb1
if mollen > maxdist:
line=fl.next()
continue
#DE1
if abs(sb1-ru1) < abs(sb1-rd1):
rc1=ru1
else:
rc1=rd1
pos=sb1-rc1
if pos in de_right:
if not mollen in des:
des[mollen]=0
des[mollen]+=1
line=fl.next()
continue
#DE2
if abs(sb2-ru2) < abs(sb2-rd2):
rc2=ru2
else:
rc2=rd2
pos=sb2-rc2
if pos in de_left:
if not mollen in des:
des[mollen]=0
des[mollen]+=1
line=fl.next()
continue
#random: map on same fragment
if rd1 == rd2:
if not mollen in rbreaks:
rbreaks[mollen]=0
rbreaks[mollen]+=1
line=fl.next()
continue
#rejoined ends
if not mollen in rejoined:
rejoined[mollen]=0
rejoined[mollen]+=1
line=fl.next()
except StopIteration:
pass
print " finished processing {} reads".format(nreads)
#transform to arrays
maxlen = max(max(rejoined),max(des),max(rbreaks))
ind = range(1,maxlen+1)
des = map(lambda x:des.get(x,0), ind)
rbreaks = map(lambda x:rbreaks.get(x,0), ind)
rejoined = map(lambda x:rejoined.get(x,0), ind)
#reweight corner for rejoined
rejoined = map(lambda x: x**.5 * rejoined[x-1]/x, ind)
#write to files
print "write to files"
fl=open(outprefix+'_count.dat','w')
fl.write('#dist\trbreaks\tdes\trejoined\n')
for i,j,k,l in zip(ind,rbreaks,des,rejoined):
fl.write('{}\t{}\t{}\t{}\n'.format(i, j, k, l))
#transform data a bit more
ind, des, rbreaks, rejoined = \
map(lambda x: moving_average(np.array(x), ma_window),
[ind, des, rbreaks, rejoined])
des, rbreaks, rejoined = map(lambda x:x/float(x.sum()),
[des, rbreaks, rejoined])
np.insert(ind,0,0)
np.insert(des,0,0)
np.insert(rbreaks,0,0)
np.insert(rejoined,0,0)
#write plot
pp = PdfPages(outprefix+'_plot.pdf')
rcParams.update({'font.size': 10})
pde = plt.fill_between(ind, des, 0, color='r', alpha=0.5)
prb = plt.fill_between(ind, rbreaks, 0, color='b', alpha=0.5)
prj = plt.fill_between(ind, rejoined, 0, color='y', alpha=0.5)
plt.ylabel("Normalized count")
plt.ylabel("Putative DNA molecule length")
plt.title("Histogram of counts close to the diagonal")
#plt.xticks(ind[::10], rotation="vertical")
plt.legend((prb, pde, prj), ("Random breaks", "Dangling ends",
"Rejoined"))
plt.gca().set_xlim([0,maxlen])
pp.savefig()
pp.close()
def plot_strand_bias_by_distance(fnam, nreads=1000000, valid_pairs=True,
half_step=20, half_len=2000,
full_step=500, full_len=50000, savefig=None):
"""
Classify reads into four categories depending on the strand on which each
of its end is mapped, and plots the proportion of each of these categories
in function of the genomic distance between them.
Only full mapped reads mapped on two diferent restriction fragments (still
same chromosome) are considered.
The four categories are:
- Both read-ends mapped on the same strand (forward)
- Both read-ends mapped on the same strand (reverse)
- Both read-ends mapped on the different strand (facing), like extra-dangling-ends
- Both read-ends mapped on the different strand (opposed), like extra-self-circles
:params fnam: path to tsv file with intersection of mapped ends
:params True valid_pairs: consider only read-ends mapped
on different restriction fragments. If False, considers only read-ends
mapped on the same restriction fragment.
:params 1000000 nreads: number of reads used to plot (if None, all will be used)
:params 20 half_step: binning for the first part of the plot
:params 2000 half_len: maximum distance for the first part of the plot
:params 500 full_step: binning for the second part of the plot
:params 50000 full_len: maximum distance for the second part of the plot
:params None savefig: path to save figure
"""
max_len = 100000
genome_seq = OrderedDict()
pos = 0
fhandler = open(fnam)
for line in fhandler:
if line.startswith('#'):
if line.startswith('# CRM '):
crm, clen = line[6:].split('\t')
genome_seq[crm] = int(clen)
else:
break
pos += len(line)
fhandler.seek(pos)
names = ['<== <== both reverse',
'<== ==> opposed (Extra-self-circles)',
'==> <== facing (Extra-dangling-ends)',
'==> ==> both forward']
dirs = [[0 for i in range(max_len)],
[0 for i in range(max_len)],
[0 for i in range(max_len)],
[0 for i in range(max_len)]]
iterator = (fhandler.next() for _ in xrange(nreads)) if nreads else fhandler
if valid_pairs:
comp_re = lambda x, y: x != y
else:
comp_re = lambda x, y: x == y
for line in iterator:
(crm1, pos1, dir1, len1, re1, _,
crm2, pos2, dir2, len2, re2) = line.strip().split('\t')[1:12]
pos1, pos2 = int(pos1), int(pos2)
if pos2 < pos1:
pos2, pos1 = pos1, pos2
dir2, dir1 = dir1, dir2
len2, len1 = len1, len2
dir1, dir2 = int(dir1), int(dir2)
len1, len2 = int(len1), int(len2)
if dir1 == 0:
pos1 -= len1
if dir2 == 1:
pos2 += len2
diff = pos2 - pos1
# only ligated; same chromsome; bellow max_dist; not multi-contact
if comp_re(re1, re2) and crm1 == crm2 and diff < max_len and len1 == len2:
dir1, dir2 = dir1 * 2, dir2
dirs[dir1 + dir2][diff] += 1
sum_dirs = [0 for i in range(max_len)]
for i in range(max_len):
sum_dir = float(sum(dirs[d][i] for d in range(4)))
for d in range(4):
try:
dirs[d][i] = dirs[d][i] / sum_dir
except ZeroDivisionError:
dirs[d][i] = 0.
sum_dirs[i] = sum_dir
plt.figure(figsize=(14, 9))
if full_step:
axLp = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
axLb = plt.subplot2grid((3, 2), (2, 0), sharex=axLp)
axRp = plt.subplot2grid((3, 2), (0, 1), rowspan=2, sharey=axLp)
axRb = plt.subplot2grid((3, 2), (2, 1), sharex=axRp, sharey=axLb)
else:
axLp = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
axLb = plt.subplot2grid((3, 1), (2, 0), sharex=axLp)
for d in range(4):
axLp.plot([sum(dirs[d][i:i + half_step]) / half_step
for i in range(0, half_len - half_step, half_step)],
alpha=0.7, label=names[d])
axLp.set_ylim(0, 1)
axLp.set_yticks([0, 0.25, 0.5, 0.75, 1])
axLp.set_xlim(0, half_len / half_step)
axLp.set_xticks(axLp.get_xticks()[:-1])
axLp.set_xticklabels([str(int(i)) for i in axLp.get_xticks() * half_step])
axLp.grid()
if full_step:
axLp.spines['right'].set_visible(False)
plt.setp(axLp.get_xticklabels(), visible=False)
axLb.spines['right'].set_visible(False)
axLp.set_ylabel('Proportion of reads in each category')
axLb.bar(range(0, half_len / half_step - 1),
[sum(sum_dirs[i:i + half_step]) / half_step
for i in range(0, half_len - half_step, half_step)],
alpha=0.5, color='k')
axLb.set_ylabel("Log number of reads\nper genomic position")
axLb.set_yscale('log')
axLb.grid()
axLb.set_xlabel('Distance between mapping position of the two ends\n'
'(averaged in windows of 20 nucleotides)')
if full_step:
for d in range(4):
axRp.plot([sum(dirs[d][i:i + full_step]) / full_step
for i in range(half_len, full_len + full_step, full_step)],
alpha=0.7, label=names[d])
axRp.spines['left'].set_visible(False)
axRp.set_xlim(0, full_len / full_step - 2000 / full_step)
axRp.set_xticks(range((10000 - half_step) / full_step, (full_len + full_step) / full_step, 20))
axRp.set_xticklabels([int(i) for i in range(10000, full_len + full_step, full_step * 20)])
plt.setp(axRp.get_xticklabels(), visible=False)
axRp.legend(title='Strand on which each read-end is mapped\n(first read-end is always smaller than second)')
axRp.yaxis.tick_right()
axRp.tick_params(labelleft=False)
axRp.tick_params(labelright=False)
axRp.grid()
axRb.bar(range(0, full_len / full_step - half_len / full_step + 1),
[sum(sum_dirs[i:i + full_step]) / full_step
for i in range(half_len, full_len + full_step, full_step)],
alpha=0.5, color='k')
axRb.set_ylim(0, max(sum_dirs) * 1.1)
axRb.spines['left'].set_visible(False)
axRb.yaxis.tick_right()
axRb.tick_params(labelleft=False)
axRb.tick_params(labelright=False)
axRb.set_xlabel('Distance between mapping position of the two ends\n'
'(averaged in windows of 500 nucleotide)')
axRb.set_yscale('log')
axRb.grid()
# decorate...
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=axLp.transAxes, color='k', clip_on=False)
axLp.plot((1 - d, 1 + d), (1-d, 1+d), **kwargs) # top-left diagonal
axLp.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=axRp.transAxes) # switch to the bottom axes
axRp.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
axRp.plot((-d, +d), (-d, +d), **kwargs) # bottom-right diagonal
w = .015
h = .030
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=axLb.transAxes, color='k', clip_on=False)
axLb.plot((1 - w, 1 + w), (1 - h, 1 + h), **kwargs) # top-left diagonal
axLb.plot((1 - w, 1 + w), ( - h, + h), **kwargs) # top-right diagonal
kwargs.update(transform=axRb.transAxes) # switch to the bottom axes
axRb.plot((- w, + w), (1 - h, 1 + h), **kwargs) # bottom-left diagonal
axRb.plot((- w, + w), ( - h, + h), **kwargs) # bottom-right diagonal
plt.subplots_adjust(wspace=0.05)
plt.subplots_adjust(hspace=0.1)
else:
axLp.legend(title='Strand on which each read-end is mapped\n(first read-end is always smaller than second)')
if savefig:
tadbit_savefig(savefig)
else:
plt.show()
# For back compatibility
def insert_sizes(fnam, savefig=None, nreads=None, max_size=99.9, axe=None,
show=False, xlog=False, stats=('median', 'perc_max'),
too_large=10000):
"""
Deprecated function, use fragment_size
"""
warn("WARNING: function has been replaced by fragment_size", category=DeprecationWarning,)
return fragment_size(fnam, savefig=savefig, nreads=nreads, max_size=max_size, axe=axe,
show=show, xlog=xlog, stats=stats,
too_large=too_large)
| gpl-3.0 |
timqian/sms-tools | lectures/5-Sinusoidal-model/plots-code/sineModelAnal-flute.py | 24 | 1179 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/flute-A4.wav'))
w = np.blackman(601)
N = 1024
H = 150
t = -80
minSineDur = .1
maxnSines = 150
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur)
plt.figure(1, figsize=(9.5, 5))
maxplotfreq = 5000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (flute-A4.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-flute.png')
plt.show() | agpl-3.0 |
nealbob/nealbob.github.io | _site/code/multicore_storage_sim.py | 2 | 2177 | import numpy as np
from matplotlib import pyplot as plt
import time
from multiprocessing import Process
from multiprocessing.queues import Queue
def retry_on_eintr(function, *args, **kw):
while True:
try:
return function(*args, **kw)
except IOError, e:
if e.errno == errno.EINTR:
continue
else:
raise
class RetryQueue(Queue):
"""Queue which will retry if interrupted with EINTR."""
def get(self, block=True, timeout=None):
return retry_on_eintr(Queue.get, self, block, timeout)
def simulate(K, mu, sig, Sbar, T, multi=False, que=0, jobno=0):
np.random.seed(jobno)
S = np.zeros(T+1)
W = np.zeros(T+1)
I = np.zeros(T+1)
S[0] = K
for t in range(T):
W[t] = min(S[t], Sbar)
I[t+1] = max(np.random.normal(mu, sig), 0)
S[t+1] = min(S[t] - W[t] + I[t+1], K)
if multi:
que.put(S)
else:
return S
def multi_sim(CORES=2, T=100):
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
return S
"""
### Sample size
T = 1000000
# Single core run ==================================
tic = time.time()
S = simulate(100, 70, 70, 70, T)
toc = time.time()
print 'Single core run time: ' + str(round(toc - tic,3))
plt.plot(S[0:100])
plt.show()
# Multi core run ==================================
tic = time.time()
CORES = 2
results = []
ques = [Queue() for i in range(CORES)]
args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)]
jobs = [Process(target=simulate, args=(a)) for a in args]
for j in jobs: j.start()
for q in ques: results.append(q.get())
for j in jobs: j.join()
S = np.hstack(results)
toc = time.time()
print 'Multi-core run time: ' + str(toc - tic)
plt.plot(S[0:100])
plt.show()
print S.shape
plt.scatter(results[0], results[1])
plt.show()
"""
| mit |
echohenry2006/tvb-library | contrib/from_articles/region_deterministic_bnm_wc.py | 5 | 3642 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figures 23 and 24 of Sanz-Leon P., Knock, S. A., Spiegler, A. and Jirsa V.
Mathematical framework for large-scale brain network modelling in The Virtual Brain.
Neuroimage, 2014, (in review)
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_wc.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_wc.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 19.42, 12 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
parser = argparse.ArgumentParser(description='Reproduce results of Figure XX presented in Sanz-Leon et al 2014')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
speed = 4.0
simulation_length = 512
oscilator = models.WilsonCowan(c_1 = 16., c_2=12., c_3=15., c_4=3, tau_e=8., tau_i=8., a_e=1.3, a_i=2., theta_e=4., theta_i=3.7)
white_matter = connectivity.Connectivity(load_default=True)
white_matter.speed = numpy.array([speed])
gcs = 8
white_matter_coupling = coupling.Linear(a=2**-gcs)
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=2**-4)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=2**-2)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
LOG.info("Starting simulation...")
#Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
RAW = numpy.array(raw_data)
TAVG = numpy.array(tavg_data)
# <codecell>
numpy.save('region_deterministic_bnm_article_wc_raw.npy', RAW)
numpy.save('region_deterministic_bnm_article_wc_rawtime.npy', raw_time)
numpy.save('region_deterministic_bnm_article_wc_tavg.npy', TAVG)
numpy.save('region_deterministic_bnm_article_wc_tavgtime.npy', tavg_time)
if args['fig']:
RAW = numpy.load('region_deterministic_bnm_article_wc_raw.npy')
raw_time = numpy.load('region_deterministic_bnm_article_wc_rawtime.npy')
#Plot temporally averaged time series
figure(1)
subplot(1, 2, 1)
plot(raw_time, RAW[:, 0, :, 0], 'k', alpha=0.042, linewidth=3)
plot(raw_time, RAW[:, 1, :, 0], 'r', alpha=0.042, linewidth=3)
plot(raw_time, RAW[:, 0, :, 0].mean(axis=1), 'k', linewidth=3)
plot(raw_time, RAW[:, 1, :, 0].mean(axis=1), 'r', linewidth=3)
xlabel('time[ms]')
#ylim([-25, 5])
xlim([0, sim.simulation_length])
subplot(1, 2, 2)
plot(RAW[:, 0, :, 0], RAW[:, 1, :, 0], alpha=0.042)
plot(RAW[:, 0, :, 0].mean(axis=1), RAW[:, 1, :, 0].mean(axis=1), alpha=1.)
plot(RAW[0, 0, :, 0], RAW[0, 1, :, 0], 'bo', alpha=0.15)
xlabel(r'$E$')
ylabel(r'$I$')
show()
fig_name = 'wc_default_speed_' + str(int(white_matter.speed)) + '_gcs_2**-' + str(gcs) + '.pdf'
savefig(fig_name)
###EoF### | gpl-2.0 |
fyffyt/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
ODM2/ODMToolsPython | odmtools/gui/pnlPlot.py | 1 | 7003 | #Boa:FramePanel:Panel1
import wx
from wx.lib.pubsub import pub as Publisher
try:
from agw import flatnotebook as fnb
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.flatnotebook as fnb
import matplotlib
matplotlib.use('WXAgg')
import plotTimeSeries
import plotSummary
import plotHistogram
import plotBoxWhisker
import plotProbability
from odmtools.controller.logicPlotOptions import SeriesPlotInfo
import logging
# from odmtools.common.logger import LoggerTool
#
# tool = LoggerTool()
# logger = tool.setupLogger(__name__, __name__ + '.log', 'w', logging.DEBUG)
logger =logging.getLogger('main')
[wxID_PANEL1, wxID_PAGEBOX, wxID_PAGEHIST, wxID_PAGEPROB,
wxID_PAGESUMMARY, wxID_PAGETIMESERIES, wxID_TABPLOTS
] = [wx.NewId() for _init_ctrls in range(7)]
class pnlPlot(fnb.FlatNotebook):
def __init__(self, parent, taskserver):
self.taskserver = taskserver
self._init_ctrls(parent)
self.initPubSub()
self.parent = parent
def _init_ctrls(self, parent):
fnb.FlatNotebook.__init__(self, id=wxID_TABPLOTS, name=u'tabPlots',
parent=parent, pos=wx.Point(0, 0), size=wx.Size(491, 288),
agwStyle=fnb.FNB_NODRAG | fnb.FNB_HIDE_TABS)
# style |= fnb.FNB_HIDE_TABS
# self.book.SetAGWWindowStyleFlag(style)
self.pltTS = plotTimeSeries.plotTimeSeries(id=wxID_PAGETIMESERIES, name='pltTS',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltTS, 'TimeSeries')
self.pltProb = plotProbability.plotProb(id=wxID_PAGEPROB, name='pltProb',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltProb, 'Probablity')
self.pltHist = plotHistogram.plotHist(id=wxID_PAGEHIST, name='pltHist',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltHist, 'Histogram')
self.pltBox = plotBoxWhisker.PlotBox(id=wxID_PAGEBOX, name='pltBox',
parent=self, pos=wx.Point(0, 0), size=wx.Size(605, 458),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltBox, 'Box/Whisker')
self.pltSum = plotSummary.plotSummary(id=wxID_PAGESUMMARY, name=u'pltSum',
parent=self, pos=wx.Point(784, 256), size=wx.Size(437, 477),
style=wx.TAB_TRAVERSAL)
self.AddPage(self.pltSum, 'Summary')
self._seriesPlotInfo = None
self.editID = None
self.legendVisible = False
def initPubSub(self):
Publisher.subscribe(self.onDateChanged, "onDateChanged")
Publisher.subscribe(self.onDateFull, "onDateFull")
Publisher.subscribe(self.onPlotType, "onPlotType")
Publisher.subscribe(self.onShowLegend, "onShowLegend")
Publisher.subscribe(self.onNumBins, "onNumBins")
Publisher.subscribe(self.onRemovePlot, "removePlot")
Publisher.subscribe(self.onRemovePlots, "removeMultPlot")
Publisher.subscribe(self.onChangeSelection, "changePlotSelection")
Publisher.subscribe(self.onUpdateValues, "updateValues")
Publisher.subscribe(self.clear, "clearPlot")
def onUpdateValues(self, event):
self.pltTS.updateValues()
def onChangeSelection(self, datetime_list):
self.pltTS.changePlotSelection( datetime_list)
def onNumBins(self, numBins):
self.pltHist.changeNumOfBins(numBins)
def onDateChanged(self, startDate, endDate):
self._seriesPlotInfo.updateDateRange(startDate, endDate)
self.redrawPlots()
def onDateFull(self):
self._seriesPlotInfo.updateDateRange()
self.redrawPlots()
# Reset the date to the full date
def onPlotType(self, event, ptype):
self.pltTS.onPlotType(ptype)
self.pltProb.onPlotType(ptype)
def onShowLegend(self, event, isVisible):
try:
self.pltTS.onShowLegend(isVisible)
self.pltProb.onShowLegend(isVisible)
self.legendVisible = isVisible
except AttributeError:
pass
def stopEdit(self):
self._seriesPlotInfo.stopEditSeries()
self.editID = None
self.pltTS.stopEdit()
self.redrawPlots()
def addEditPlot(self, memDB, seriesID, record_service):
self.record_service = record_service
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self.editID = seriesID
self._seriesPlotInfo.setEditSeries(self.editID)
self.pltTS.setEdit(self.editID)
self.redrawPlots()
def addPlot(self, memDB, seriesID):
"""
Creates the plot
"""
logger.debug("Adding plot")
Publisher.sendMessage("EnablePlotButton", plot=self.getActivePlotID(), isActive=True)
if not self._seriesPlotInfo:
self._seriesPlotInfo = SeriesPlotInfo(memDB, self.taskserver)
self._seriesPlotInfo.update(seriesID, True)
logger.debug("Redrawing plots")
self.redrawPlots()
def onRemovePlot(self, seriesID):
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
def onRemovePlots(self, seriesIDs):
for series in seriesIDs:
self._seriesPlotInfo.update(series.id, False)
self.redrawPlots()
def redrawPlots(self):
logger.debug("Plot Summary")
self.pltSum.Plot(self._seriesPlotInfo)
logger.debug("Plot Probability")
self.pltProb.Plot(self._seriesPlotInfo)
logger.debug("Plot Boxwhisker")
self.pltBox.Plot(self._seriesPlotInfo)
logger.debug("Plot Timeseries")
self.pltTS.Plot(self._seriesPlotInfo)
logger.debug("Plot Histogram")
self.pltHist.Plot(self._seriesPlotInfo)
self.onShowLegend(event=None, isVisible=self.legendVisible)
maxStart, maxEnd, currStart, currEnd = self._seriesPlotInfo.getDates()
Publisher.sendMessage("resetdate", startDate=maxStart, endDate=maxEnd, currStart=currStart, currEnd=currEnd)
def selectPlot(self, value):
self.SetSelection(value)
def getActivePlotID(self):
return self.GetSelection()
def close(self):
self.pltTS.close()
def clear(self):
"""
:return:
"""
if self._seriesPlotInfo:
for seriesID in self._seriesPlotInfo.getSeriesIDs():
self._seriesPlotInfo.update(seriesID, False)
self.redrawPlots()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_random_forest_regression_multioutput.py | 1 | 3492 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k',
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1], edgecolor='k',
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1], edgecolor='k',
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
# plt.show()
pltshow(plt)
| mit |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/testing/image_util.py | 11 | 3765 | # This module contains some functionality from the Python Imaging
# Library, that has been ported to use Numpy arrays rather than PIL
# Image objects.
# The Python Imaging Library is
# Copyright (c) 1997-2009 by Secret Labs AB
# Copyright (c) 1995-2009 by Fredrik Lundh
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.cbook import deprecated, warn_deprecated
warn_deprecated('1.4.0', name='matplotlib.testing.image_util',
obj_type='module')
@deprecated('1.4.0')
def autocontrast(image, cutoff=0):
"""
Maximize image contrast, based on histogram. This completely
ignores the alpha channel.
"""
assert image.dtype == np.uint8
output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8)
for i in xrange(0, 3):
plane = image[:,:,i]
output_plane = output_image[:,:,i]
h = np.histogram(plane, bins=256)[0]
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in xrange(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in xrange(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in xrange(256):
if h[lo]:
break
for hi in xrange(255, -1, -1):
if h[hi]:
break
if hi <= lo:
output_plane[:,:] = plane
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
lut = np.arange(256, dtype=np.float)
lut *= scale
lut += offset
lut = lut.clip(0, 255)
lut = lut.astype(np.uint8)
output_plane[:,:] = lut[plane]
return output_image
| gpl-2.0 |
nblago/utils | src/model/BBFit.py | 1 | 66521 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 10:57:34 2018
Class that enables to fit a black body function to a set of magntidues.
@author: nadiablago
@version: 0.22
"""
from __future__ import print_function
import matplotlib
from matplotlib import pylab as plt
import corner
from astropy import units as u
import astropy.constants as cnt
import os, sys
import numpy as np
import emcee
from scipy import stats
import extinction
from astropy.cosmology import FlatLambdaCDM
import warnings
#If PYSYN_CDBS is not defined, it adds the environment variable which points to the
#filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print ("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/Users/USER/SOMEWHERE/pysynphot_files"
print ('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])
'''os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
# Add the environment variable which points to the filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
print('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])'''
os.environ['PYSYN_CDBS'] = "/Users/nadiablago/Documents/Software/pysynphot_files/"
import pysynphot as ps
class BBFit:
def __init__(self):
'''
Constructor initializes all the parameters to
defaults.
'''
#Some predefined constants in the units we need them
self.c = cnt.c.to(u.cm/u.s).value #2.99792458e+10 #cm / s
self.h = cnt.h.to(u.erg * u.s).value #6.62607004e-27 #erg s
self.k_B = cnt.k_B.to(u.erg / u.K).value#1.38064852e-16 #erg / K
#Source parameters
self.av_host = 0
self.av_mw = 0
self.law = "Fitzpatrick"
self.law_mw = "Fitzpatrick"
#Black body models
self.initT1 = 10000 #K
self.initR1 = 1 # Rsun
self.initT2 = 3000 #K
self.initR2 = 1 # Rsun
self.z = None
self.distMpc = None #in Mpc
self.mjd = 0
#Power law models
self.alpha = 0.75
self.alphaerr1 = 0
self.alphaerr2 = 0
self.scale = 1
self.scaleerr1 = 0.1
self.scaleerr2 = 0.1
#Disk model (scale is already in the power law model)
#Stellar mass, radius, log accretion mass per year, outer radius of accretion disk
self.Mstar = 1
self.Mstarerr1 = 0.1
self.Mstarerr2 = 0.1
self.Rstar = 1
self.Rstarerr1 = 0.1
self.rstarerr2 = 0.1
self.logMacc = -8
self.logMaccerr1 = -9
self.logMaccerr2 = -9
self.R_out = 3
self.R_outerr1 = 1
self.R_outerr2 = 1
#Location for plots
self.plotdir = "../../data/plots"
#Location for fit results
self.resdir = "../../data/modelfits"
self.resfile = "fit_results.txt"
#MCMC parameters
self.method = 'ensemble' #or HA for Hastings
self.mhtune = True # tuning of the Metropolis-Hastings
self.niterations = 10000
self.burnin = 5000
self.threads = 10
self.nwalkers = 20
self.sampler = None
self.model = "BlackBody" #others are "BlackBody_Av" or "BlackBody2_Av", "PowerLaw", "PowerLaw_BlackBody"
#Input data parameters.
#The fitter will run either with magnitudes or with fluxes
self.mags = None
self.magerrs = None
self.bands = None
#Indicates whether the magnitude is in AB or Vega
self.photsys = None
self.wls = None
self.fluxes = None
self.fluxerrs = None
#Output
self.T = None
self.Terr1 = None
self.Terr2 = None
self.R = None
self.Rerr1 = None
self.Rerr2 = None
self.L = None
self.Lerr1 = None
self.Lerr2 = None
#Output for the secondary star
self.Tsec = None
self.Tsecerr1 = None
self.Tsecerr2 = None
self.Rsec = None
self.Rsecerr1 = None
self.Rsecerr2 = None
self.Lsec = None
self.Lsecerr1 = None
self.Lsecerr2 = None
self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
#Set the plotting characteristics
self._matplotlib_init()
self.banddic = {"Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/ctio_y_andicam.dat"),
"J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_j_002.fits"),
"H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_h_002.fits"),
"K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_k_002.fits"),
"keck,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"keck,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"keck,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat"),
"keck,K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.K.dat"),
"spitzer,3.6": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac1_3.6.dat"),
"spitzer,4.5": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac2_4.5.dat"),
"spitzer,5.8": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac3_5.8.dat"),
"spitzer,8.0": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac4_8.0.dat"),
"wise,w1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W1.dat"),
"wise,w2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W2.dat"),
"wise,w3": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W3.dat"),
"wise,w4": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W4.dat"),
"swift,uvw2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw2_uvot.dat"),
"swift,uvm2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvm2_uvot.dat"),
"swift,uvw1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw1_uvot.dat"),
"swift,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_u_uvot.dat"),
"swift,b": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_b_uvot.dat"),
"swift,v": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_v_uvot.dat"),
"paranal,Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Y.dat"),
"paranal,Z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Z.dat"),
"paranal,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.J.dat"),
"paranal,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.H.dat"),
"paranal,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Ks.dat"),
"omegacam,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.u_SDSS.dat"),
"omegacam,g": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.g_SDSS.dat"),
"omegacam,r": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.r_SDSS.dat"),
"omegacam,i": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.i_SDSS.dat"),
"omegacam,z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.z_SDSS.dat"),
"omegacam,Halpha": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.Halpha.dat"),
"nirc2,j": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"nirc2,h": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"nirc2,ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat")
}
def _matplotlib_init(self):
'''
Set up preferences on matplotlib plot appearance.
'''
matplotlib.rcParams['xtick.minor.size'] = 6
matplotlib.rcParams['xtick.major.size'] = 6
matplotlib.rcParams['ytick.major.size'] = 6
matplotlib.rcParams['xtick.minor.size'] = 4
matplotlib.rcParams['ytick.minor.size'] = 4
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['font.size']= 14.0
matplotlib.rcParams['font.family']= 'sans-serif'
matplotlib.rcParams['xtick.major.width']= 2.
matplotlib.rcParams['ytick.major.width']= 2.
matplotlib.rcParams['ytick.direction']='in'
matplotlib.rcParams['xtick.direction']='in'
def _band2flux(self):
'''
Will transform the magnitude measurement into a flux measurement.
'''
wls = np.array([])
fluxes = np.array([])
fluxerr = np.array([])
#Create a black body spectrum with an arbitrary value
lam = np.linspace(100, 120000, 10000)
sp = ps.BlackBody(10000)
sp.convert('flam')
sp2 = self._model_2(lam, 10000, 1)
sp2 = sp2 * np.max(sp.flux) / np.max(sp2)
sp = ps.ArraySpectrum(lam, sp2)
for b, m, me, psys in zip(self.bands, self.mags, self.magerrs, self.photsys):
print ("Band,",b)
#Create the observation bandpass
try:
band = ps.ObsBandpass(b)
except ValueError:
#The band is not in the standard list
#We need to go to the dictionary to retrieve the transmission function.
band = ps.FileBandpass(self.banddic[b])
#band.waveunits.convert("angstrom")
#else:
# band.waveunits = ps.units.Angstrom
#Oftain the effective (average) wavelength
effwave = band.avgwave()
#Correct for Milky Way extinction
m = m - extinction.fitzpatrick99(np.array([effwave]), a_v=self.av_mw, unit='aa')[0]
#Normalize the spectrum to the magnitude of the observation
sp_norm = sp.renorm(m, psys, band, force="extrap")
#Observe with the band
obs = ps.Observation(sp_norm, band)
#Get the flux
flux = obs.effstim('flam')
wls = np.append(wls, effwave)
fluxes = np.append(fluxes, flux)
#Compute the error bars
flux_high = flux * 10**(0.4*me)
flux_low = flux * 10**(-0.4*me)
fluxerr = np.append(fluxerr, np.average([flux - flux_low, flux_high-flux]))
return wls, fluxes, fluxerr
def _model(self, lam, p):
'''
Returns the flux for the single BlackBody model for the wavelength introduced.
lam is in A.
p = (T, R)
'''
lam = lam * u.Angstrom
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_2(self, lam, T, R):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
T = T * u.K
R = (R * u.Rsun).to(u.cm)
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
if a_v < 0:
return lam * np.inf
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area = np.pi * (4 * np.pi * R**2)
flam = area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model_av_r_2(self, lam, T, R, a_v):
'''
Return units: erg s-1 A-1
'''
return self._model_av_r(lam, (T, R, a_v))
def _model2_av(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T1 = p[0] * u.K
R1 = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
T2 = p[3] * u.K
R2 = (p[4] * u.Rsun).to(u.cm)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area1 = np.pi * (4 * np.pi * R1**2)
area2 = np.pi * (4 * np.pi * R2**2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = area1 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T1))-1)
flam2 = area2 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T2))-1)
flam = flam1 + flam2
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model2_av_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av(lam, (T1, R1, a_v, T2, R2))
def _model2_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
a_v = p[2]
T2 = p[3]
R2 = p[4]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
if a_v < 0:
return lam * np.inf
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam*1e8, a_v, unit='aa'))
flam = (flam1 + flam2) * flux_red *1e-8 #to erg / s / A
#Apply the reddening and transform to erg /s/ A from cm
return flam
def _model2_av_r_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av_r(lam, (T1, R1, a_v, T2, R2))
def _model2_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
T2 = p[2]
R2 = p[3]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
flam = (flam1 + flam2)*1e-8 #to erg / s / A
return flam
def _model2_r_2(self, lam, T1, R1, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_r(lam, (T1, R1, T2, R2))
def _model_powerlaw(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
a_v = p[2]
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
area = 10**scale
return area * flam * flux_red #.to(u.erg/u.s/u.Angstrom).value
def _model_powerlaw_2(self, lam, alpha, scale, a_v):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_powerlaw(lam, (alpha, scale, a_v))
def _model_powerlaw_bb(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
T_bb = p[2]
R_bb = p[3]
bb_flux = self._model_2(lam, T_bb, R_bb)
lam = lam * u.Angstrom
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
area = 10**scale
return area * flam + bb_flux
def _model_powerlaw_bb_2(self, lam, alpha, scale, T_bb, R_bb):
'''
Return units: erg s-1 A-1
'''
return self._model_powerlaw_bb(lam, (alpha, scale, T_bb, R_bb))
def _model_accretion_disk_old2(self, lam, Mstar, Rstar, logMacc, scale, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk_old(lam, (Mstar, Rstar, logMacc, scale, R_out))
def _model_accretion_disk_old(self, lam, p):
'''
Equation 1 from Kenyon, Hartmann, Hewett 1988.
'''
Mstar = p[0]
Rstar = p[1]
Macc = p[2]
scale = p[3]
R_out = p[4]
if Mstar<0 or Macc<-12 or Rstar<0.001 or scale<0 or R_out < Rstar:
return np.ones(len(lam))*np.inf
Macc = 10**Macc
R = np.linspace(Rstar,R_out,20)
dR = R[1] - R[0]
F_r = (3 * cnt.G * Mstar * u.Msun * Macc * u.Msun/u.year / 8 / np.pi / (u.Rsun*Rstar)**3) * (Rstar/R)**3 * (1 - (Rstar/R)**0.5)
F_r = F_r.to(u.erg/u.cm**2/u.s)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
T_max = 13000 * u.K *(Mstar)**0.25 * (Macc / 1e-5)**0.25 * (Rstar)**-0.75
#Cretae the disk model
#For each differential radii, we compute the black body spectra corresponding
# to the temperature at that radius, and scale it by the flux expected at that
# radius.
disk_model = []
for i, ri in enumerate(R):
if ri>Rstar and ri<=1.5*Rstar:
sp = ps.BlackBody(T_max.value)
#sp = ps.BlackBody(T_r[i].value)
else:
sp = ps.BlackBody(T_r[i].value)
sp.convert('flam')
tot_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
#Compute the total emitted flux for the spherical area.
#Adopt the outer radius as the
dist_flux_fac = np.pi * ((ri+dR)**2 - ri**2) * (u.Rsun.to(u.cm))**2
scaled_flux = sp.flux / tot_flux * F_r[i].value #* dist_flux_fac
disk_model.append(scaled_flux)
disk = np.array(disk_model)
disk = np.nansum(disk, axis=0)
sp = ps.ArraySpectrum(sp.wave, disk)
#int_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
int_flux = np.max(sp.flux)
#Normalize (recover) the integral flux from 1kpc
flux_norm= sp.flux #/int_flux
#sp_norm = ps.ArraySpectrum(sp.wave, flux_norm)
flux_norm = np.interp(lam, sp.wave, flux_norm)
#flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
return flux_norm #* scale #* flux_red
def _model_disk_T(self, R, Mstar, Rstar, logMacc):
F_r = (3 * cnt.G * Mstar * 10**float(logMacc) * (u.Msun**2/u.year)) \
/ (8 * np.pi * (u.Rsun*R)**3) \
* (1 - (Rstar/R)**0.5)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
#print (F_r, T_r)
mask = (R>=Rstar) * (R<=1.5*Rstar)
if np.count_nonzero(mask)>0:
T_max = 13000 * u.K *(Mstar)**0.25 * (10**float(logMacc) / 1e-5)**0.25 * (Rstar)**-0.75
T_r[mask] = T_max
#print (mask, "Tmax", T_max, np.count_nonzero(mask))
return T_r.value
def _model_accretion_disk2(self, lam, Mstar, Rstar, logMacc, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk(lam, (Mstar, Rstar, logMacc, R_out))
def _model_accretion_disk(self, lam, p):
Mstar = np.maximum(1e-6, p[0])
Rstar = np.maximum(1e-6, p[1])
logMacc = np.maximum(-12, np.minimum(-7, p[2]))
R_out = np.maximum(1e-6, p[3])
i = 45.0
#Deg to radians
i = np.deg2rad(i%360)
d = self.distMpc*(u.Mpc).to(u.cm)
R = np.linspace(Rstar, R_out, 30)*u.Rsun
nu = (cnt.c / (lam*u.Angstrom)).to(u.Hz)
T_r = self._model_disk_T(R.value, Mstar, Rstar, logMacc)
F_nu_arr = []
for ni in nu:
I_nu_r = R / (np.exp(cnt.h * ni/(cnt.k_B*T_r*u.K)) - 1)
I_flux = np.trapz(I_nu_r, R)
F_nu = (4 * np.pi * cnt.h * np.cos(i)*ni**3)/(cnt.c**2 * d**2) * I_flux
F_nu_arr.append(F_nu.to(u.erg/u.s/u.Hz).value)
F_nu_arr = np.array(F_nu_arr)
s = ps.ArraySpectrum(lam, F_nu_arr, fluxunits='fnu', waveunits='Angstrom')
s.convert('flam')
fluxFactor = 4*np.pi*d**2
return s.flux*fluxFactor
def _get_Qnu(self, a, lam, wavedusttype="silicate"):
'''
'''
from scipy import interpolate
x = np.array([0.001, 0.01, 0.1, 1]) #size
y = np.array([0.01, 0.06, 0.2, 7, 10 ]) #wavelength
#--> size
# | wave
# v
z = np.array([[0.02, 0.2, 0.85, 0.85],
[0.02, 0.7, 0.7, 0.7],
[0.001, 0.01, 0.7, 0.7],
[0.00007, 0.001, 0.01, 0.1],
[0.001, 0.01, 0.1, 1]])
f = interpolate.interp2d(x, y, z, kind='linear')
return f(a, lam)
def _get_knu(self, a, wave, rho=1, ):
'''
Returns the values for the dust mass absorption coefficient
for the Spitzer bands for the given grain size and wavelength.
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * Q_nu(a))
'''
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * self.Q_nu(a, wave))
return k_nu
def _model_dust(self, Md, Td, a):
'''
Using the dust modelling approach from Fox et. al. 2010.
The assumption is that the dust is optically thin and that there is only one size and
one dust composition.
The opactities are taken from their Figure 4 values.
F_nu = M_d B_nu (T_d )k_nu(a) / d**2
'''
Bnu = ps.BlackBody(Td)
Bnu.convert('fnu')
knu = self._get_knu(a, wave) * u.cm**2 / u.g
Fnu = Md * u.Msun * Bnu * knu / (self.distMpc * u.Mpc)**2
#likelihood function
def _like(self, p, xdat, ydat, errdat, debug=False):
'''
p: function parameters
args: carry anything we want to pass to our function (e.g. the data)
'''
if self.model == "BlackBody":
ymod = self._model(xdat, p)
elif self.model == "BlackBody_Av":
ymod = self._model_av_r(xdat, p)
elif self.model == "BlackBody2_Av":
ymod = self._model2_av_r(xdat, p)
elif self.model == "BlackBody2":
ymod = self._model2_r(xdat, p)
elif self.model == "PowerLaw":
ymod = self._model_powerlaw(xdat, p)
elif self.model == "PowerLaw_BlackBody":
ymod = self._model_powerlaw_bb(xdat, p)
elif self.model == "Disk":
ymod = self._model_accretion_disk(xdat, p)
else:
print ("Unknown model", self.model)
return np.nan
#Discard models which exceed the upper limits
if (np.any(ymod[errdat<0] > ydat[errdat<0])):
prob = 1e-320
#Compute the likelihood with only valid datapoints.
else:
prob = stats.norm.pdf(ydat[errdat>0] , ymod[errdat>0] , errdat[errdat>0] )
# log probabilities
# we add tiny number to avoid NaNs
mylike = np.log(prob + 1e-320).sum()
return mylike
def _logposterior(self, p, xdat, ydat, errdat):
'''
Returns the posterior of the observations. In essence the likelihood and the prior:
#log(likelihood) + log(prior)
'''
lp = self._logprior(p)
if (not np.isinf(lp)):
lp= self._like(p, xdat, ydat, errdat) + lp
return lp
def _logprior(self, p):
'''
Returns the prior probability distribution for each model.
'''
if self.model == "BlackBody":
T1 = p[0]
R1 = p[1]
if T1 < 0 or R1 < 0:
return -np.inf
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 1, 50000)
if self.model =="BlackBody_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
if T1 < 0 or R1 < 0 or av < 0:
return -np.inf
else:
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "BlackBody2":
T1 = p[0]
R1 = p[1]
T2 = p[2]
R2 = p[3]
if T1 < 0 or T2 > T1 or T2 < 0 or R1 < 0 or R2<0:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 10000)
logp = logp + stats.uniform.logpdf(R1, 10, 12000)
logp = logp + stats.uniform.logpdf(T2, 10, 5000)
logp = logp + stats.uniform.logpdf(R2, 10, 12000)
elif self.model == "BlackBody2_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
T2 = p[3]
R2 = p[4]
if T1 < 0 or T2 > T1 or T2 < 0 or av < 0 or av > 10:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 1000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
logp = logp + stats.uniform.logpdf(T2, 100, 1000)
logp = logp + stats.uniform.logpdf(R2, 10000, 120000)
elif self.model == "PowerLaw":
alpha = p[0]
scale = p[1]
av = p[2]
if av < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "PowerLaw_BlackBody":
alpha = p[0]
scale = p[1]
T1 = p[2]
R1 = p[3]
if R1 < 0 or T1 < 0 or alpha < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(T1, 500, 20000)
logp = logp + stats.uniform.logpdf(R1, 0, 500)
elif self.model == "Disk":
Mstar = p[0]
Rstar = p[1]
logMacc = p[2]
R_out = p[3]
if Rstar < 0 or Mstar < 0 or logMacc < -12 or R_out<0 or R_out < Rstar:
logp = -np.inf
else:
logp = stats.uniform.logpdf(Mstar, 0, 1.44)
logp = logp + stats.uniform.logpdf(Rstar, 0, 10)
logp = logp + stats.uniform.logpdf(logMacc, -12, 7)
logp = logp + stats.uniform.logpdf(R_out, 0, 50)
return logp
def _get_max_and_intervals(self, x):
'''
Provided a chain of samples, finds the average value and returns the values
for a 1 sigma distribution following the 34 and 66 percentiles.
'''
return np.percentile(x, 34), np.percentile(x, 50), np.percentile(x, 66)
#return percent1, maxp, percent2
def _area2rsun(self, A):
'''
Given the area of the black body in cm2 returns the radius for the object in solar radius.
'''
Aream2 = A * u.cm**2 # add units
Rad = np.sqrt(Aream2/(4*(np.pi)**2)).to(u.Rsun) #in Rsun
return Rad.value
def _fill_output(self):
'''
Computes the confidence intervals from the MCMC distribution.
Transforms the temperature ad radius into a black body luminosity.
'''
if self.model.startswith("BlackBody"):
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.L = self._get_bol_lum(T, R)
self.Lerr1 = self.L - self._get_bol_lum(T1, R1)
self.Lerr2 = self._get_bol_lum(T2, R2) - self.L
if self.model == "BlackBody_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model == "BlackBody2_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,4])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
elif self.model == "BlackBody2":
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
self.Lsec = self._get_bol_lum(Tsec, Rsec)
self.Lsecerr1 = self.Lsec - self._get_bol_lum(Tsec1, Rsec1)
self.Lsecerr2 = self._get_bol_lum(Tsec2, Rsec2) - self.Lsec
elif self.model=="PowerLaw":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model=="PowerLaw_BlackBody":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.scale = scale
self.scaleerr1 = scale - scale1
self.scaleerr2 = scale2 - scale
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
elif self.model=="Disk":
Mstar1, Mstar, Mstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
Rstar1, Rstar, Rstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
logMacc1, logMacc, logMacc2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R_out1, R_out, R_out2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
#scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Mstar = Mstar
self.Mstarerr1 = Mstar - Mstar1
self.Mstarerr2 = Mstar2 - Mstar
self.Rstar = Rstar
self.Rstarerr1 = Rstar - Rstar1
self.Rstarerr2 = Rstar2 - Rstar
self.logMacc = logMacc
self.logMaccerr1 = logMacc - logMacc1
self.logMaccerr2 = logMacc2 - logMacc
self.R_out = R_out
self.R_outerr1 = R_out - R_out1
self.R_outerr2 = R_out2 - R_out
def _save_output(self):
'''
Saves in a results file.
'''
exists = os.path.isfile(self.resfile)
with open(self.resfile, 'a') as outfile:
print ("Saving results to %s"%self.resfile)
if self.model == "BlackBody":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, self.L, self.Lerr1, self.Lerr2, self.av_mw))
elif self.model == "BlackBody_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "BlackBody2":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Lsec Lsecerr1 Lsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f \n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2,
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, \
self.Lsec, self.Lsecerr1, self.Lsecerr2, self.av_mw))
elif self.model == "BlackBody2_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2,\
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, self.av_mw))
elif self.model == "PowerLaw":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "PowerLaw_BlackBody":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 T Terr1 Terr2 R Rerr1 Rerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.av_mw))
elif self.model == "Disk":
if not exists:
outfile.write("mjd M Merr1 Merr2 Rstar Rerr1 Rerr2 Macc Maccerr1 Maccerr2 R_out R_outerr1 R_outerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.Mstar, self.Mstarerr1, self.Mstarerr1, \
self.Rstar, self.Rstarerr1, self.Rstarerr2,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
#self.scale, self.scaleerr1, self.scaleerr2, \
self.R_out, self.R_outerr1, self.R_outerr2,\
self.av_mw))
else:
print ("Unknown model! %s"%self.model)
def _get_bol_lum(self, T, R):
'''
T is in K
R in R_sun.
Gives the Lbol in Lsun
'''
L = cnt.sigma_sb * (T * u.K)**4 * 4 * np.pi * (R*u.Rsun)**2
return (L.to(u.Lsun)).value
def _get_save_path(self, savefile, plot_name=""):
'''
Checks what savefile name has been given.
If there is a value, then it jsut stores it in the plot directory provided.
If there is no name, then it creates a filename with the suffix provided.
It also checks if there is already a file named like that, and it that is the case,
it increases the suffix so that it has a higher number, avoiding collision.
'''
#If there is a given name to store the file, then we use that one
if (not savefile is None):
if os.path.dirname(savefile) == "":
name = os.path.join(self.plotdir, os.path.basename(savefile))
#If there is no name, then we will save the plots in the plot directory
#with an automatic name.
# This name will increase a count if the name exists already.
else:
i = 0
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
while (os.path.isfile(name)):
i = i+1
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
return name
def _initialize_parameters(self, plot=False):
'''
Runs the least squares optimiztion routine to find the best initial parameters
to start the MCMC with.
'''
lam = np.linspace(np.min(self.wls)*0.9, np.max(self.wls)*1.1, 2000)
a_v_wls = extinction.fitzpatrick99(self.wls, a_v=self.av_mw, unit='aa')
reddening = 10**(0.4*a_v_wls)
if self.model == "BlackBody":
flux_ini = self._model_2(lam, self.initT1, self.initR1)
p0 = (self.initT1, self.initR1)
print ("Initial parameters given:", p0)
#Perform a LSQ fit
#params, covar = curve_fit(self._model_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_2(lam, *params)
if plot:
plt.clf()
mask_lims = self.fluxerrs<0
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims], yerr=self.fluxerrs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims], yerr=self.fluxes[mask_lims]*0.2, fmt="o", color="b", uplims=True)
plt.xlabel("Wavelength [A]")
plt.ylabel("$F_{\\lambda}$ [erg/s/cm2/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_bb")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody_Av":
flux_ini = self._model_av_r_2(lam, self.initT1, self.initR1, self.av_host)
p0 = (self.initT1, self.initR1, self.av_host)
print ("Initial ", p0)
#params, covar = curve_fit(self._model_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_bb_av")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2_Av":
flux_ini = self._model2_av_r_2(lam, self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2":
flux_ini = self._model2_r_2(lam, self.initT1, self.initR1, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_r_2(lam, *params)
#flux_1 = self._model_2(lam, *params[0:2])
#flux_2 = self._model_2(lam, *params[2:])
if plot:
plt.clf()
plt.figure(figsize=(6,4))
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
#plt.plot(lam, flux_1, label="BB1")
#plt.plot(lam, flux_2, label="BB2")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.legend(loc="best", fontsize=10)
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_2bb")
plt.savefig(name, dpi=200)
elif self.model == "PowerLaw":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_2(lam, self.alpha, self.scale, self.av_host)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_powerlaw")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
elif self.model == "PowerLaw_BlackBody":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_bb_2(lam, self.alpha, self.scale, self.initT1, self.initR1)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="MW ext. corr")
plt.errorbar(self.wls, self.fluxes/reddening, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes/reddening), 1.2*np.max(self.fluxes))
plt.legend(loc="best")
name = self._get_save_path(None, "fluxes_obs_powerlaw_bb")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
if self.model == 'Disk':
#params = (0.5, 0.2, 5e-9, 1, 2)
p0 = (self.Mstar, self.Rstar, self.logMacc, self.R_out)
#params, covar = curve_fit(self._model_accretion_disk2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#print ("LSQ fit: Mstar:", params[0], " Rstar", params[1], "logMacc ", \
# params[2], "R_out", params[3])
lam = np.linspace(3000, 25000, 2000)
#flux_disk = self._model_accretion_disk2(lam, params[0], params[1], params[2], params[3])
if plot:
plt.clf()
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
#plt.plot(lam, flux_disk, lw=3)
plt.xlabel("Wavelength [$\\mu$m]")
plt.ylabel("Flux [erg/cm$^2$/s]")
plt.ylim(np.nanmin(self.fluxes)*0.9, np.nanmax(self.fluxes)*1.2)
plt.legend()
name = self._get_save_path(None, "fluxes_obs_disk")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
def initialize(self, plot=False):
'''
Will transform the magnitudes to fluxes and use the distance to the object to
calculate the luminosity at each wavelength.
'''
if (not os.path.isdir(self.plotdir)):
os.makedirs(self.plotdir)
print ("Created plot directory %s"%self.plotdir)
#Directory where to store the results
if (not os.path.isdir(self.resdir)):
os.makedirs(self.resdir)
print ("Created result directory %s"%(self.resdir))
self.resfile = os.path.join(self.resdir, self.model + os.path.basename(self.resfile))
# generate the data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.wls, self.fluxes, self.fluxerrs = self._band2flux()
#Plot the raw fluxes before correcting them.
'''if (plot):
plt.figure(figsize=(8,6))
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01, self.bands[i].split(",")[-1], alpha=.4)
name = self._get_save_path(None, "fluxes_observed")
plt.yscale("log")
plt.xlabel("Wavelength [A]")
plt.ylabel("log (Flux/[erg/cm2/s])")
plt.tight_layout()
plt.savefig(name, dpi=200)'''
if not self.distMpc is None and self.distMpc !=0:
print ("Using distance to the source of %.1e Mpc"%self.distMpc)
fluxFactor = (4*np.pi*((self.distMpc*u.Mpc).to(u.cm) )**2).value
elif (self.distMpc is None or self.distMpc==0 )and (not self.z is None and self.z != 0):
self.distMpc = self.cosmo.luminosity_distance(self.z)
#Compute the flux multiplication factor for the object if it is at distance distMpc
#We transform that to cm, as the flux is in erg cm-2 s-1
fluxFactor = (4*np.pi*(self.distMpc.to(u.cm) )**2).value
else: # self.distMpc is None and self.z is None:
#Here we do not use any multiplication flux factor
print ("Warning: no redshift or distance provided!")
fluxFactor = 1
self.fluxes = self.fluxes * fluxFactor
self.fluxerrs = self.fluxerrs * fluxFactor
self._initialize_parameters(plot)
def run(self):
'''
Runs the main MCMC process.
Retrieves the priors, the likelihood process and computes the posterior probability.
'''
xs = self.wls
ys = self.fluxes
errs = self.fluxerrs
if self.model == "BlackBody":
p0 = np.array([ self.initT1, self.initR1])
sigs = np.array([self.initT1*0.2, self.initR1*0.2])
elif self.model == "BlackBody_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host])
sigs = np.array([2000, 10, 0.5])
elif self.model == "BlackBody2":
p0 = np.array([ self.initT1, self.initR1, self.initT2, self.initR2])
sigs = np.array([self.initT1*0.2, self.initR1*0.2, self.initT2*0.2, self.initR2*0.2])
elif self.model == "BlackBody2_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host, self.initT2, self.initR2])
sigs = np.array([2000, 5, 1, 2000, 5])
elif self.model == "PowerLaw":
p0 = np.array([ self.alpha, self.scale, self.av_host])
sigs = np.array([2, 3, 2])
elif self.model == "PowerLaw_BlackBody":
p0 = np.array([ self.alpha, self.scale, self.initT1, self.initR1])
sigs = np.array([2, 3, 2000, 2])
elif self.model == "Disk":
p0 = np.array([ self.Mstar, self.Rstar, self.logMacc, self.R_out])
sigs = np.array([0.1, 0.01, 1, 0.1])
print ("Initialized with p0", p0, " and sigmas ", sigs)
else:
print ("-------------------CRITICAL ERROR!----------------------")
print ("-------------------UNKNOWN model! %s----------------------"%self.model)
print ("-------------------CRITICAL ERROR!----------------------")
sys.exit()
ndim = len(p0)
# emsemble MCMC
p0s = emcee.utils.sample_ball(p0, sigs, self.nwalkers)
# initialize the ball of initial conditions
#Supports the threads=X argument for parallelization
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self._logposterior,\
args=(xs, ys, errs), threads=10)
pos, lnprob, state = sampler.run_mcmc(p0s, self.burnin)
print ("Burning phase finished")
sampler.reset()
pos, lnprob, state = sampler.run_mcmc(pos, self.niterations)
print ('Acceptance ratio', sampler.acceptance_fraction)
self.sampler = sampler
print ("MCMC main phase finished")
self._fill_output()
self._save_output()
def plot_corner_posteriors(self, savefile=None):
'''
Plots the corner plot of the MCMC results.
'''
if self.model == "BlackBody2":
labels=["T1", "R1", "T2", "R2"]
elif self.model.startswith("BlackBody"):
labels=["T1", "R1", "Av", "T2", "R2"]
elif self.model == "PowerLaw":
labels=["alpha", "scale", "Av"]
elif self.model == "PowerLaw_BlackBody":
labels = ["alpha", "scale", "T", "R"]
elif self.model == "Disk":
labels = ["Mstar", "Rstar", "logMacc", "R_out"]
ndim = len(self.sampler.flatchain[0,:])
chain = self.sampler
samples = chain.flatchain
samples = samples[:,0:ndim]
plt.figure(figsize=(8,8))
fig = corner.corner(samples, labels=labels[0:ndim], quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
fig.suptitle("MJD: %.2f"%self.mjd)
name = self._get_save_path(savefile, "mcmc_posteriors")
plt.savefig(name)
plt.close("all")
plt.figure(figsize=(8,ndim*3))
for n in range(ndim):
plt.subplot(ndim,1,n+1)
chain = self.sampler.chain[:,:,n]
nwalk, nit = chain.shape
for i in np.arange(nwalk):
plt.plot(chain[i], lw=0.1)
plt.ylabel(labels[n])
plt.xlabel("Iteration")
name_walkers = self._get_save_path(savefile, "mcmc_walkers")
plt.tight_layout()
plt.savefig(name_walkers)
plt.close("all")
def plot_fit(self, lambdaFlambda=False):
'''
Plots the best fit model to the data.
'''
lam = np.linspace( np.min(self.wls) -1500 , np.max(self.wls) + 1500, 1000)
plt.clf()
plt.figure(figsize=(8,6))
mask_lims = self.fluxerrs<0
if lambdaFlambda:
factor_obs=self.wls
else:
factor_obs=np.ones_like(self.wls)
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims]*factor_obs[~mask_lims], yerr=self.fluxerrs[~mask_lims]*factor_obs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims]*factor_obs[mask_lims], yerr=self.fluxes[mask_lims]*0.2*factor_obs[mask_lims], fmt="o", color="b", uplims=True)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01*factor_obs[i], self.bands[i], alpha=.4, fontsize=8)
if self.model == "BlackBody":
fluxbb = self._model(lam, (self.T, self.R))
if lambdaFlambda:
factor = lam
else:
factor = np.ones_like(lam)
plt.plot(lam, fluxbb*factor, "k-", label="BB fit")
plt.title("T: %d K R:%d R$_{\odot}$ Lumiosity %.2e L$_{\odot}$"%(self.T, self.R, self.L))
elif self.model == "BlackBody_Av":
fluxbb = self._model(lam, (self.T, self.R))
fluxbb_red = self._model_av_r(lam, (self.T, self.R, self.Av))
plt.plot(lam, fluxbb, "k-", label="BB fit")
plt.plot(lam, fluxbb_red, "red", label="BB fit + reddening")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f"%(np.round(self.T,0), np.round(self.R,0), np.round(self.L,1), self.Av))
elif self.model == "BlackBody2_Av":
fluxbb_red = self._model2_av(lam, (self.T, self.R, self.Av))
fluxbb_secondary_red = self._model2_av(lam, (self.Tsec, self.Rsec, self.Av))
fluxbb_with_seconday = self._model2_av(lam, (self.T, self.R, self.Av, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_red, "k-", label="BB1 fit + reddening")
plt.plot(lam, fluxbb_secondary_red, "k--", label="BB2 fit + reddening")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f\n T2: %.1f R2: %.1f"%(self.T, \
self.R, self.L, self.Av, self.Tsec, self.Rsec))
elif self.model == "BlackBody2":
fluxbb_primary = self._model(lam, (self.T, self.R))
fluxbb_secondary = self._model(lam, (self.Tsec, self.Rsec))
fluxbb_with_seconday = self._model2_r(lam, (self.T, self.R, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_primary, "k-", label="BB1 fit")
plt.plot(lam, fluxbb_secondary, "k--", label="BB2 fit")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %d K R:%d R$_{\odot}$ T2: %d R2: %d"%( self.T, \
self.R, self.Tsec, self.Rsec))
elif self.model == "PowerLaw":
flux = self._model_powerlaw(lam, (self.alpha, self.scale, self.Av))
plt.plot(lam, flux, "k-", label="PowerLaw + reddening")
plt.title("$\\alpha$: %.1f Av: %.2f"%(self.alpha, self.Av))
elif self.model == "PowerLaw_BlackBody":
flux = self._model_powerlaw_bb(lam, (self.alpha, self.scale, self.T, self.R))
flux_pw = self._model_powerlaw(lam, (self.alpha, self.scale, 0))
flux_bb = self._model(lam, (self.T, self.R))
plt.plot(lam, flux, "k-", label="PowerLaw + BlackBody")
plt.plot(lam, flux_pw, "b--", label="PowerLaw")
plt.plot(lam, flux_bb, "g:", label="BlackBody")
plt.title("$\\alpha$: %.1f scale: %.2e T: %.1f R:%.1f"%(self.alpha, self.scale, self.T, self.R))
elif self.model == "Disk":
fluxdisk = self._model_accretion_disk(lam, (self.Mstar, self.Rstar, self.logMacc, self.R_out))
plt.plot(lam, fluxdisk, "k-", label="Disk fit")
plt.title("M:%.3f M$_{\\odot}$ R:%.3f R$_{\odot}$ M$_{acc}$:%.2f R_out: %.2f"%(self.Mstar, self.Rstar, self.logMacc, self.R_out))
ymin, ymax = plt.ylim()
#plt.ylim(np.max([ymin, np.min(self.fluxes)*0.01]), ymax)
plt.xlabel("Wavelength [$\\AA$]")
if (lambdaFlambda):
plt.ylabel("$\\lambda F_{\\lambda}$ [erg/s]")
plt.ylim(ymin=np.min(self.fluxes*factor_obs) * 0.1)
else:
plt.ylabel("$F_{\\lambda}$ [erg/s/$\\AA$]")
plt.ylim(ymin=np.min(self.fluxes) * 0.1)
plt.yscale("log")
plt.legend()
name = self._get_save_path(None, "mcmc_best_fit_model")
plt.savefig(name)
plt.close("all")
def write_fit_params(self):
'''
Write the best fit parameters of the model to the standard output.
'''
if self.model.startswith("BlackBody"):
#Prints the best parameters
print ('''
Temperature: \t %.3f -%.3f +%.3f K
Radius: \t\t %.2e -%.2e +%.2e R$_{\odot}$
Luminosity: \t %.3e -%.3e +%.3e L$_{\odot}$'''%(\
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2))
if self.model == "BlackBody_Av":
print (" Av: \t\t\t %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
if self.model == "BlackBody2":
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.2e -%.2e +%.2e R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
print (" Luminosity2 %.3e -%.3e +%.3e L$_{\odot}$"%(self.Lsec, self.Lsecerr1, self.Lsecerr2))
if self.model == "BlackBody2_Av":
print (" Av: %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.1f -%.1f +%.1f R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
if (self.model == "PowerLaw"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale : %.2e -%.2e +%.2e
Av %.2f -%.2f +%.2f'''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2))
if (self.model == "PowerLaw_BlackBody"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale (R): %.2e -%.2e +%.2e
T %.2f -%.2f +%.2f
R %.2f -%.2f +%.2f '''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2,\
self.T, self.Terr1, self.Terr2,\
self.R, self.Rerr1, self.Rerr2 ))
if (self.model == "Disk"):
print ('''
Mstar: %.3f$_{-%.3f}^{+%.3f}$
Rstar (10^8 cm): %.3f -%.3f +%.3f
logMacc %.3f$_{-%.3f}^{+%.3f}$
R_out %.3f$_{-%.3f}^{+%.3f}$ '''%(\
self.Mstar, self.Mstarerr1, self.Mstarerr2, \
self.Rstar*(u.Rsun.to(u.cm))/1e8, self.Rstarerr1*(u.Rsun.to(u.cm))/1e8, self.Rstarerr2*(u.Rsun.to(u.cm))/1e8,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
self.R_out, self.R_outerr1, self.R_outerr2 ))
| mit |
Jericho/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
xiaoxiamii/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
tdegeus/GooseEYE | docs/examples/clusters_dilate_periodic.py | 1 | 2926 | r'''
Plot and/or check.
Usage:
script [options]
Options:
-s, --save Save output for later check.
-c, --check Check against earlier results.
-p, --plot Plot.
-h, --help Show this help.
'''
# <snippet>
import numpy as np
import GooseEYE
# generate image
I = np.zeros((21, 21), dtype='bool')
I[4, 4] = True
I[18, 19] = True
I[19, 19] = True
I[20, 19] = True
I[19, 18] = True
I[19, 20] = True
# clusters
C = GooseEYE.Clusters(I).labels()
# dilate
CD = GooseEYE.dilate(C)
# </snippet>
if __name__ == '__main__':
import docopt
args = docopt.docopt(__doc__)
if args['--save']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'w') as data:
data['I'] = I
data['C'] = C
data['CD'] = CD
if args['--check']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'r') as data:
assert np.all(np.equal(data['I'][...], I))
assert np.all(np.equal(data['C'][...], C))
assert np.all(np.equal(data['CD'][...], CD))
if args['--plot']:
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
# color-scheme: modify such that the background is white
# N.B. for a transparent background -> 4th column == 1.
cmap = cm.jet(range(256))
cmap[0, :3] = 1.0
cmap = mpl.colors.ListedColormap(cmap)
try:
plt.style.use(['goose', 'goose-latex'])
except:
pass
fig, axes = plt.subplots(figsize=(18, 6), nrows=1, ncols=3)
ax = axes[0]
im = ax.imshow(I, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'image')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[1]
im = ax.imshow(CD, clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'clusters + dilate')
ax = axes[2]
im = ax.imshow(np.tile(CD, (3, 3)), clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 60])
ax.yaxis.set_ticks([0, 60])
ax.set_xlim([-0.5, 60.5])
ax.set_ylim([-0.5, 60.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'periodic copy')
plt.savefig('clusters_dilate_periodic.svg')
| gpl-3.0 |
ellio167/lammps | examples/SPIN/test_problems/validation_damped_precession/llg_precession.py | 9 | 1646 | #!/usr/bin/env python3
import numpy as np , pylab, tkinter
import math
import matplotlib.pyplot as plt
import mpmath as mp
mub=5.78901e-5 # Bohr magneton (eV/T)
hbar=0.658212 # Planck's constant (eV.fs/rad)
g=2.0 # Lande factor (adim)
gyro=g*mub/hbar # gyromag ratio (rad/fs/T)
alpha=0.01 # damping coefficient
pi=math.pi
Bnrm=10.0 # mag. field (T)
Bext = np.array([0.0, 0.0, 1.0])
Sn = 2.0 # spin norm (in # of muB)
S = np.array([1.0, 0.0, 0.0])
N=500000 # number of timesteps
dt=0.1 # timestep (fs)
# Rodrigues rotation formula
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians
"""
axis = np.asarray(axis)
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# calc. precession field
def calc_rot_vector(Fi,Sp):
rot = gyro*Sn*Bnrm*(Fi-alpha*np.cross(Fi,Sp))
return rot
# np.set_printoptions(precision=4)
for t in range (0,N):
wf = calc_rot_vector(Bext,S)
theta=dt*np.linalg.norm(wf)
axis=wf/np.linalg.norm(wf)
S = np.dot(rotation_matrix(axis, theta), S)
en = -hbar*gyro*Sn*Bnrm*np.dot(S,Bext)
# print res. in ps for comparison with LAMMPS
print(t*dt/1000.0,S[0],S[1],S[2],en)
| gpl-2.0 |
SanPen/GridCal | src/research/PTDF/ACPTDF_research2.py | 1 | 14022 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
C. Matthew Davis and Thomas J. Overbye
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
| gpl-3.0 |
quheng/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
gclenaghan/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
akraft196/pyASC | examples/mplot1.py | 1 | 7267 | #! /usr/bin/env python
#
# quick and dirty processing of the MD All Sky images
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy.misc import imsave
import numpy as np
import aplpy
import argparse as ap
import os.path
import logging
import time
def d(ff,box=[]):
#very specific for 16 bit data, since we want to keep the data in uint16
h = fits.open(ff, do_not_scale_image_data=True)
if len(box)==0:
return h[0].header, h[0].data
else:
# figure out 0 vs. 1 based offsets; box is 1 based
return h[0].header, h[0].data[box[1]:box[3], box[0]:box[2]]
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = d(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print type(sum1), type(sum2)
return h,sum1,np.sqrt(sum2),c
def show(sum):
""" some native matplotlib display,
doesn't show pointsources well at all
"""
ip = plt.imshow(sum)
plt.show()
def show2(sum):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum)
#fig.show_grayscale()
fig.show_colorscale()
def show3(sum1,sum2):
""" aplpy is the better viewer clearly
"""
fig = aplpy.FITSFigure(sum1,subplot=(2,2,1))
#fig = aplpy.FITSFigure(sum2,subplot=(2,2,2),figure=1)
fig.show_grayscale()
# For some variations on this theme, e.g. time.time vs. time.clock, see
# http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python
#
class Dtime(object):
""" Class to help measuring the wall clock time between tagged events
Typical usage:
dt = Dtime()
...
dt.tag('a')
...
dt.tag('b')
"""
def __init__(self, label=".", report=True):
self.start = self.time()
self.init = self.start
self.label = label
self.report = report
self.dtimes = []
dt = self.init - self.init
if self.report:
logging.info("Dtime: %s ADMIT " % self.label + str(self.start))
logging.info("Dtime: %s BEGIN " % self.label + str(dt))
def reset(self, report=True):
self.start = self.time()
self.report = report
self.dtimes = []
def tag(self, mytag):
t0 = self.start
t1 = self.time()
dt = t1 - t0
self.dtimes.append((mytag, dt))
self.start = t1
if self.report:
logging.info("Dtime: %s " % self.label + mytag + " " + str(dt))
return dt
def show(self):
if self.report:
for r in self.dtimes:
logging.info("Dtime: %s " % self.label + str(r[0]) + " " + str(r[1]))
return self.dtimes
def end(self):
t0 = self.init
t1 = self.time()
dt = t1 - t0
if self.report:
logging.info("Dtime: %s END " % self.label + str(dt))
return dt
def time(self):
""" pick the actual OS routine that returns some kind of timer
time.time : wall clock time (include I/O and multitasking overhead)
time.clock : cpu clock time
"""
return np.array([time.clock(), time.time()])
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO)
dt = Dtime("mplot1")
#--start, -s n
#--end, -e n
#--box x1 y1 x2 y2
parser = ap.ArgumentParser(description='Plotting .fits files.')
parser.add_argument('-f', '--frame', nargs = '*', type = int, help = 'Starting and ending parameters for the frames analyzed')
parser.add_argument('-b', '--box', nargs = 4, type = int, help = 'Coordinates for the bottom left corner and top right corner of a rectangle of pixels to be analyzed from the data. In the structure x1, y1, x2, y2 (1 based numbers)')
parser.add_argument('-g', '--graphics', nargs = 1, type = int, default = 0, help = 'Controls whether to display or save graphics. 0: no graphics, 1: display graphics, 2: save graphics as .png')
args = vars(parser.parse_args())
if args['frame'] == None:
count = 0
start = None
end = None
step = 1
#while we have yet to find an end
while end == None:
filename = 'IMG%05d.FIT' % count
#if start has not been found yet, and this file exists
if start == None and os.path.isfile(filename):
start = count
#if start has been found and we finally found a file that doesn't exist, set end to the last file that existed (count - 1.FIT)
elif start != None and not os.path.isfile(filename):
end = count - 1
count += 1
elif len(args['frame']) >= 2 and len(args['frame']) <= 3:
start = args['frame'][0] # starting frame (IMGnnnnn.FIT)
end = args['frame'][1] # ending frame
if len(args['frame']) == 3:
step = args['frame']
else:
step = 1
else:
raise Exception,"-f needs 0, 2, or 3 arguments."
box = args['box'] # BLC and TRC
if box == None:
box = []
dt.tag("start")
# compute the average and dispersion of the series
h1,sum1,sum2,cube = dsum(start,end,step,box=box) # end can be uninitialized here might throw an error?
dt.tag("dsum")
nz = cube.shape[0]
# delta X and Y images
dsumy = sum1 - np.roll(sum1, 1, axis = 0) # change in the y axis
dsumx = sum1 - np.roll(sum1, 1, axis = 1) # change in the x axis
# write them to FITS
fits.writeto('dsumx.fits', dsumx, h1, clobber=True)
fits.writeto('dsumy.fits', dsumy, h1, clobber=True)
fits.writeto('sum1.fits', sum1, h1, clobber=True)
fits.writeto('sum2.fits', sum2, h1, clobber=True)
dt.tag("write2d")
# 3D cube to
h1['NAXIS'] = 3
h1['NAXIS3'] = nz
fits.writeto('cube.fits', cube, h1, clobber=True)
dt.tag("write3d")
if args['graphics'][0] == 1:
# plot the sum1 and sum2 correllation (glueviz should do this)
s1 = sum1.flatten()
s2 = sum2.flatten()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(s1,s2)
plt.show()
show2(sum1)
show2(sum2)
if args['graphics'][0] == 2:
imsave('sum1.png', sum1)
imsave('sum2.png', sum2)
dt.tag("done")
dt.end()
| mit |
lhilt/scipy | scipy/stats/tests/test_morestats.py | 4 | 70469 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_, assert_allclose, assert_equal, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import stats
from .common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
have_matplotlib = True
except Exception:
have_matplotlib = False
# test data gear.dat from NIST for Levene and Bartlett test
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(object):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(object):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(object):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(object):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(object):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(object):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(object):
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.bartlett, g1, x)
class TestLevene(object):
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.levene, g1, x)
class TestBinomP(object):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(object):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Perturb input to break ties in the transformed data
# See https://github.com/scipy/scipy/pull/8042 for more details
rs = np.random.RandomState(123)
_perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist()
g1_ = _perturb(g1)
g2_ = _perturb(g2)
g3_ = _perturb(g3)
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(object):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(object):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
class TestWilcoxon(object):
def test_wilcoxon_bad_arg(self):
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2],
alternative="dummy")
def test_zero_diff(self):
x = np.arange(20)
# pratt and wilcox do not work if x - y == 0
assert_raises(ValueError, stats.wilcoxon, x, x, "wilcox")
assert_raises(ValueError, stats.wilcoxon, x, x, "pratt")
# ranksum is n*(n+1)/2, split in half if method == "zsplit"
assert_equal(stats.wilcoxon(x, x, "zsplit"), (20*21/4, 1.0))
def test_pratt(self):
# regression test for gh-6805: p-value matches value from R package
# coin (wilcoxsign_test) reported in the issue
x = [1, 2, 3, 4]
y = [1, 2, 3, 5]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
res = stats.wilcoxon(x, y, zero_method="pratt")
assert_allclose(res, (0.0, 0.31731050786291415))
def test_wilcoxon_arg_type(self):
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
def test_accuracy_wilcoxon(self):
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.0031724568006762576)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes(self):
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie(self):
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
def test_onesided(self):
# tested against "R version 3.4.1 (2017-06-30)"
# x <- c(125, 115, 130, 140, 140, 115, 140, 125, 140, 135)
# y <- c(110, 122, 125, 120, 140, 124, 123, 137, 135, 145)
# cfg <- list(x = x, y = y, paired = TRUE, exact = FALSE)
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "less", correct = TRUE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = FALSE)))
# do.call(wilcox.test, c(cfg, list(alternative = "greater", correct = TRUE)))
x = [125, 115, 130, 140, 140, 115, 140, 125, 140, 135]
y = [110, 122, 125, 120, 140, 124, 123, 137, 135, 145]
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less")
assert_equal(w, 27)
assert_almost_equal(p, 0.7031847, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="less", correction=True)
assert_equal(w, 27)
assert_almost_equal(p, 0.7233656, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater")
assert_equal(w, 27)
assert_almost_equal(p, 0.2968153, decimal=6)
with suppress_warnings() as sup:
sup.filter(UserWarning, message="Sample size too small")
w, p = stats.wilcoxon(x, y, alternative="greater", correction=True)
assert_equal(w, 27)
assert_almost_equal(p, 0.3176447, decimal=6)
class TestKstat(object):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = [stats.kstat(data, n) for n in [1, 2, 3, 4]]
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(object):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(object):
def setup_method(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(object):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(object):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
def test_gh_6873(self):
# Regression test for gh-6873.
# This example was taken from gh-7534, a duplicate of gh-6873.
data = [198.0, 233.0, 233.0, 392.0]
llf = stats.boxcox_llf(-8, data)
# The expected value was computed with mpmath.
assert_allclose(llf, -17.93934208579061)
# This is the data from github user Qukaiyi, given as an example
# of a data set that caused boxcox to fail.
_boxcox_data = [
15957, 112079, 1039553, 711775, 173111, 307382, 183155, 53366, 760875,
207500, 160045, 473714, 40194, 440319, 133261, 265444, 155590, 36660,
904939, 55108, 138391, 339146, 458053, 63324, 1377727, 1342632, 41575,
68685, 172755, 63323, 368161, 199695, 538214, 167760, 388610, 398855,
1001873, 364591, 1320518, 194060, 194324, 2318551, 196114, 64225, 272000,
198668, 123585, 86420, 1925556, 695798, 88664, 46199, 759135, 28051,
345094, 1977752, 51778, 82746, 638126, 2560910, 45830, 140576, 1603787,
57371, 548730, 5343629, 2298913, 998813, 2156812, 423966, 68350, 145237,
131935, 1600305, 342359, 111398, 1409144, 281007, 60314, 242004, 113418,
246211, 61940, 95858, 957805, 40909, 307955, 174159, 124278, 241193,
872614, 304180, 146719, 64361, 87478, 509360, 167169, 933479, 620561,
483333, 97416, 143518, 286905, 597837, 2556043, 89065, 69944, 196858,
88883, 49379, 916265, 1527392, 626954, 54415, 89013, 2883386, 106096,
402697, 45578, 349852, 140379, 34648, 757343, 1305442, 2054757, 121232,
606048, 101492, 51426, 1820833, 83412, 136349, 1379924, 505977, 1303486,
95853, 146451, 285422, 2205423, 259020, 45864, 684547, 182014, 784334,
174793, 563068, 170745, 1195531, 63337, 71833, 199978, 2330904, 227335,
898280, 75294, 2011361, 116771, 157489, 807147, 1321443, 1148635, 2456524,
81839, 1228251, 97488, 1051892, 75397, 3009923, 2732230, 90923, 39735,
132433, 225033, 337555, 1204092, 686588, 1062402, 40362, 1361829, 1497217,
150074, 551459, 2019128, 39581, 45349, 1117187, 87845, 1877288, 164448,
10338362, 24942, 64737, 769946, 2469124, 2366997, 259124, 2667585, 29175,
56250, 74450, 96697, 5920978, 838375, 225914, 119494, 206004, 430907,
244083, 219495, 322239, 407426, 618748, 2087536, 2242124, 4736149, 124624,
406305, 240921, 2675273, 4425340, 821457, 578467, 28040, 348943, 48795,
145531, 52110, 1645730, 1768364, 348363, 85042, 2673847, 81935, 169075,
367733, 135474, 383327, 1207018, 93481, 5934183, 352190, 636533, 145870,
55659, 146215, 73191, 248681, 376907, 1606620, 169381, 81164, 246390,
236093, 885778, 335969, 49266, 381430, 307437, 350077, 34346, 49340,
84715, 527120, 40163, 46898, 4609439, 617038, 2239574, 159905, 118337,
120357, 430778, 3799158, 3516745, 54198, 2970796, 729239, 97848, 6317375,
887345, 58198, 88111, 867595, 210136, 1572103, 1420760, 574046, 845988,
509743, 397927, 1119016, 189955, 3883644, 291051, 126467, 1239907, 2556229,
411058, 657444, 2025234, 1211368, 93151, 577594, 4842264, 1531713, 305084,
479251, 20591, 1466166, 137417, 897756, 594767, 3606337, 32844, 82426,
1294831, 57174, 290167, 322066, 813146, 5671804, 4425684, 895607, 450598,
1048958, 232844, 56871, 46113, 70366, 701618, 97739, 157113, 865047,
194810, 1501615, 1765727, 38125, 2733376, 40642, 437590, 127337, 106310,
4167579, 665303, 809250, 1210317, 45750, 1853687, 348954, 156786, 90793,
1885504, 281501, 3902273, 359546, 797540, 623508, 3672775, 55330, 648221,
266831, 90030, 7118372, 735521, 1009925, 283901, 806005, 2434897, 94321,
309571, 4213597, 2213280, 120339, 64403, 8155209, 1686948, 4327743,
1868312, 135670, 3189615, 1569446, 706058, 58056, 2438625, 520619, 105201,
141961, 179990, 1351440, 3148662, 2804457, 2760144, 70775, 33807, 1926518,
2362142, 186761, 240941, 97860, 1040429, 1431035, 78892, 484039, 57845,
724126, 3166209, 175913, 159211, 1182095, 86734, 1921472, 513546, 326016,
1891609
]
class TestBoxcox(object):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'. I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5)
class TestBoxcoxNormmax(object):
def setup_method(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(object):
def setup_method(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestYeojohnson_llf(object):
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=0)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
class TestYeojohnson(object):
def test_fixed_lmbda(self):
np.random.seed(12345)
# Test positive input
x = stats.loggamma.rvs(5, size=50) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = stats.loggamma.rvs(5, size=50) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = stats.loggamma.rvs(5, size=50) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
np.random.seed(1234567)
n_samples = 20000
x = np.random.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=0)
lmbda = 1.5
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
class TestYeojohnsonNormmax(object):
def setup_method(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_mle(self):
maxlog = stats.yeojohnson_normmax(self.x)
assert_allclose(maxlog, 1.876393, rtol=1e-6)
def test_darwin_example(self):
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
lmbda = stats.yeojohnson_normmax(x)
assert np.allclose(lmbda, 1.305, atol=1e-3)
class TestCircFuncs(object):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_unit8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
class TestMedianTest(object):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
| bsd-3-clause |
wangsharp/trading-with-python | spreadApp/makeDist.py | 77 | 1720 | from distutils.core import setup
import py2exe
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
import matplotlib
opts = {
'py2exe': {
"compressed": 1,
"bundle_files" : 3,
"includes" : ["sip",
"matplotlib.backends",
"matplotlib.backends.backend_qt4agg",
"pylab", "numpy",
"matplotlib.backends.backend_tkagg"],
'excludes': ['_gtkagg', '_tkagg', '_agg2',
'_cairo', '_cocoaagg',
'_fltkagg', '_gtk', '_gtkcairo', ],
'dll_excludes': ['libgdk-win32-2.0-0.dll',
'libgobject-2.0-0.dll']
}
}
setup(name="triton",
version = "0.1",
scripts=["spreadScanner.pyw"],
windows=[{"script": "spreadScanner.pyw"}],
options=opts,
data_files=matplotlib.get_py2exe_datafiles(),
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="spreadDetective"))],
zipfile = None) | bsd-3-clause |
fabioticconi/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 42 | 2944 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
from itertools import cycle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
colors = cycle(['b', 'r', 'g', 'c', 'k'])
neg_log_alphas_lasso = -np.log10(alphas_lasso)
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
neg_log_alphas_positive_lasso = -np.log10(alphas_positive_lasso)
for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors):
l1 = plt.plot(neg_log_alphas_lasso, coef_l, c=c)
l2 = plt.plot(neg_log_alphas_positive_lasso, coef_pl, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
neg_log_alphas_positive_enet = -np.log10(alphas_positive_enet)
for (coef_e, coef_pe, c) in zip(coefs_enet, coefs_positive_enet, colors):
l1 = plt.plot(neg_log_alphas_enet, coef_e, c=c)
l2 = plt.plot(neg_log_alphas_positive_enet, coef_pe, linestyle='--', c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
markcheno/trading-with-python | lib/classes.py | 76 | 7847 | """
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
| bsd-3-clause |
jjx02230808/project0223 | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
vsoch/expfactory-docker | expdj/settings.py | 2 | 5848 | """
Django settings for expdj project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
import tempfile
from datetime import timedelta
import matplotlib
from celery import Celery
from kombu import Exchange, Queue
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DOMAIN_NAME = "https://expfactory.org" # MUST BE HTTPS FOR MECHANICAL TURK
DOMAIN_NAME_HTTP = "http://expfactory.org" # MUST BE HTTPS FOR MECHANICAL TURK
ADMINS = (('rblair', 'rosswilsonblair@gmail.com'),)
MANAGERS = ADMINS
DEBUG = True
MTURK_ALLOW = False # Allow users to deploy to real Mturk (not just sandbox)
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ["*"]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': '5432',
}
}
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.sitemaps',
'django_user_agents',
'django.contrib.staticfiles',
'django_extensions',
'expdj.apps.main',
'expdj.apps.turk',
'expdj.apps.experiments',
'expdj.apps.users',
'crispy_forms',
'polymorphic',
'guardian',
'dbbackup',
'djcelery',
'rest_framework',
'rest_framework.authtoken',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'guardian.backends.ObjectPermissionBackend'
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_user_agents.middleware.UserAgentMiddleware',
)
ROOT_URLCONF = 'expdj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
WSGI_APPLICATION = 'expdj.wsgi.application'
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
ANONYMOUS_USER_ID = -1 # django-guardian
# Static files (CSS, JavaScript, Images)
MEDIA_ROOT = './static'
MEDIA_URL = '/static/'
STATIC_ROOT = './assets'
STATIC_URL = '/assets/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
SENDFILE_BACKEND = 'sendfile.backends.development'
PRIVATE_MEDIA_REDIRECT_HEADER = 'X-Accel-Redirect'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Celery config
BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
CELERY_IMPORTS = ('expdj.apps.turk.tasks', )
# here is how to run a task regularly
# CELERYBEAT_SCHEDULE = {
# 'task name': {
# 'task': 'task_name',
# 'schedule': timedelta(days=1)
# },
# }
CELERY_TIMEZONE = 'Europe/Berlin'
# REST FRAMEWORK
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
# SSL ENABLED
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
EXP_REPO = os.path.join(BASE_DIR, 'expdj/experiment_repo')
# Bogus secret key.
try:
from secrets import *
except ImportError:
from bogus_secrets import *
# Local settings
try:
from local_settings import *
except ImportError:
pass
| mit |
apache/incubator-mxnet | python/mxnet/numpy/random.py | 4 | 40839 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for ops used in imperative programming."""
from ..ndarray import numpy as _mx_nd_np
from ..random import seed
__all__ = ["randint", "uniform", "normal", "choice", "rand", "multinomial", "multivariate_normal",
"logistic", "gumbel", "f",
"laplace",
"shuffle", "randn", "gamma", "beta", "chisquare", "exponential", "lognormal",
"weibull", "pareto", "power", "rayleigh",
"seed"]
def randint(low, high=None, size=None, dtype=None, ctx=None, out=None):
r"""Return random integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution of
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
Parameters
----------
low : int
Lowest (signed) integer to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
high : int, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
on the platform. The default value is 'np.int'.
ctx : Context, optional
Device context of output. Default is current context.
out : ndarray, optional
The output ndarray (default is `None`).
Returns
-------
out : ndarray of ints
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
Examples
--------
>>> np.random.randint(2, size=10)
array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
array([[4, 0, 2, 1],
[3, 2, 2, 0]])
"""
return _mx_nd_np.random.randint(low, high, size, dtype, ctx, out)
def uniform(low=0.0, high=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval
``[low, high)`` (includes low, but excludes high). In other words,
any value within the given interval is equally likely to be drawn
by `uniform`.
Parameters
----------
low : float, ndarray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, ndarray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a scalar tensor containing a single value is returned if
``low`` and ``high`` are both scalars. Otherwise,
``np.broadcast(low, high).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray
Drawn samples from the parameterized uniform distribution.
See Also
--------
randint : Discrete uniform distribution, yielding integers.
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
Notes
-----
The probability density function of the uniform distribution is
.. math:: p(x) = \frac{1}{b - a}
anywhere within the interval ``[a, b)``, and zero elsewhere.
When ``high`` == ``low``, values of ``low`` will be returned.
If ``high`` < ``low``, the results are officially undefined
and may eventually raise an error, i.e. do not rely on this
function to behave when passed arguments satisfying that
inequality condition.
"""
return _mx_nd_np.random.uniform(low, high, size=size, ctx=ctx, dtype=dtype, out=out)
def normal(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float, optional
Mean (centre) of the distribution.
scale : float, optional
Standard deviation (spread or "width") of the distribution.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., `(m, n, k)`, then `m * n * k`
samples are drawn. If size is `None` (default), a scalar tensor containing
a single value is returned if loc and scale are both scalars. Otherwise,
``np.broadcast(low, high).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized `normal distribution` [1]_.
Notes
-----
The probability density for the Gaussian distribution is
.. math:: p(x) = \frac{1}{\sqrt{ 2 \pi \sigma^2 }}
e^{ - \frac{ (x - \mu)^2 } {2 \sigma^2} },
where :math:`\mu` is the mean and :math:`\sigma` the standard
deviation. The square of the standard deviation, :math:`\sigma^2`,
is called the variance.
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \sigma` and :math:`x - \sigma` [2]_). This implies that
`numpy.random.normal` is more likely to return samples lying close to
the mean, rather than those far away.
References
----------
.. [1] Wikipedia, "Normal distribution",
https://en.wikipedia.org/wiki/Normal_distribution
.. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
Examples
--------
>>> mu, sigma = 0, 0.1 # mean and standard deviation
>>> s = np.random.normal(mu, sigma, 1000)
Verify the mean and the variance:
>>> np.abs(mu - np.mean(s)) < 0.01
array(True)
"""
return _mx_nd_np.random.normal(loc, scale, size, dtype, ctx, out)
def lognormal(mean=0.0, sigma=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw samples from a log-normal distribution.
Draw samples from a `log-normal distribution` [1]_ with specified mean,
standard deviation, and array shape. Note that the mean and standard
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
Parameters
----------
mean : float or array_like of floats, optional
Mean value of the underlying normal distribution. Default is 0.
sigma : float or array_like of floats, optional
Standard deviation of the underlying normal distribution. Must be
non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``mean`` and ``sigma`` are both scalars.
Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized log-normal distribution.
Notes
-----
A variable `x` has a log-normal distribution if `log(x)` is normally
distributed. The `probability density function for the log-normal
distribution` [2]_ is:
.. math:: p(x) = \frac{1}{\sigma x \sqrt{2\pi}}
e^{(-\frac{(ln(x)-\mu)^2}{2\sigma^2})}
where :math:`\mu` is the mean and :math:`\sigma` is the standard
deviation of the normally distributed logarithm of the variable.
A log-normal distribution results if a random variable is the *product*
of a large number of independent, identically-distributed variables in
the same way that a normal distribution results if the variable is the
*sum* of a large number of independent, identically-distributed
variables.
References
----------
.. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
Distributions across the Sciences: Keys and Clues,"
BioScience, Vol. 51, No. 5, May, 2001.
https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
.. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
Examples
--------
Draw samples from the distribution:
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> s = np.random.lognormal(mu, sigma, 1000)
"""
return _mx_nd_np.random.lognormal(mean, sigma, size, dtype, ctx, out)
def logistic(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a logistic distribution.
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
Parameters
----------
loc : float or array_like of floats, optional
Parameter of the distribution. Default is 0.
scale : float or array_like of floats, optional
Parameter of the distribution. Must be non-negative.
Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized logistic distribution.
Examples
--------
Draw samples from the distribution:
>>> loc, scale = 10, 1
>>> s = np.random.logistic(loc, scale, 10000)
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=50)
# plot against distribution
>>> def logist(x, loc, scale):
... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
>>> lgst_val = logist(bins, loc, scale)
>>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
>>> plt.show()
"""
return _mx_nd_np.random.logistic(loc, scale, size, ctx, out)
def gumbel(loc=0.0, scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a Gumbel distribution.
Draw samples from a Gumbel distribution with specified location and
scale.
Parameters
----------
loc : float or array_like of floats, optional
The location of the mode of the distribution. Default is 0.
scale : float or array_like of floats, optional
The scale parameter of the distribution. Default is 1. Must be non-
negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``loc`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Gumbel distribution.
Examples
--------
Draw samples from the distribution:
>>> mu, beta = 0, 0.1 # location and scale
>>> s = np.random.gumbel(mu, beta, 1000)
Display the histogram of the samples, along with
the probability density function:
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, 30, density=True)
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp( -np.exp( -(bins - mu) /beta) ),
... linewidth=2, color='r')
>>> plt.show()
Show how an extreme value distribution can arise from a Gaussian process
and compare to a Gaussian:
>>> means = []
>>> maxima = []
>>> for i in range(0,1000) :
... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
>>> count, bins, ignored = plt.hist(maxima, 30, density=True)
>>> beta = np.std(maxima) * np.sqrt(6) / np.pi
>>> mu = np.mean(maxima) - 0.57721*beta
>>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
... * np.exp(-np.exp(-(bins - mu)/beta)),
... linewidth=2, color='r')
>>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
... linewidth=2, color='g')
>>> plt.show()
"""
return _mx_nd_np.random.gumbel(loc, scale, size, ctx, out)
def multinomial(n, pvals, size=None, **kwargs):
r"""
Draw samples from a multinomial distribution.
The multinomial distribution is a multivariate generalisation of the binomial distribution.
Take an experiment with one of ``p`` possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the distribution represents n such experiments.
Its values, ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome was ``i``.
Parameters
----------
n : int
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the p different outcomes. These should sum to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples
are drawn. Default is None, in which case a single value is returned.
Returns
-------
out : ndarray
The drawn samples, of shape size, if that was provided. If not, the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional value drawn from the distribution.
Examples
--------
Throw a dice 1000 times, and 1000 times again:
>>> np.random.multinomial(1000, [1/6.]*6, size=2)
array([[164, 161, 179, 158, 150, 188],
[178, 162, 177, 143, 163, 177]])
A loaded die is more likely to land on number 6:
>>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
array([19, 14, 12, 11, 21, 23])
>>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3])
array([32, 68])
"""
return _mx_nd_np.random.multinomial(n, pvals, size, **kwargs)
# pylint: disable=unused-argument
def multivariate_normal(mean, cov, size=None, check_valid=None, tol=None):
"""
multivariate_normal(mean, cov, size=None, check_valid=None, tol=None)
Draw random samples from a multivariate normal distribution.
The multivariate normal, multinormal or Gaussian distribution is a
generalization of the one-dimensional normal distribution to higher
dimensions. Such a distribution is specified by its mean and
covariance matrix. These parameters are analogous to the mean
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
This operator is a little different from the one in official NumPy.
The official NumPy operator only accepts 1-D ndarray as mean and 2-D ndarray as cov,
whereas the operator in MXNet np supports batch operation and auto-broadcasting.
Both `mean` and `cov` may have any number of leading dimensions, which correspond
to a batch shape. They are not necessarily assumed to have the same batch shape,
just ones which can be broadcasted.
Parameters
----------
mean : K-D ndarray, of shape (..., N)
Mean of the N-dimensional distribution.
cov : (K+1)-D ndarray, of shape (..., N, N)
Covariance matrix of the distribution. The last two dimensions must be symmetric and
positive-semidefinite for proper sampling.
size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``,
``m*n*k`` identically distributed batchs of samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement.
If no shape is specified, a batch of (`N`-D) sample is returned.
check_valid : { 'warn', 'raise', 'ignore' }, optional
Behavior when the covariance matrix is not positive semidefinite.
(Not supported)
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
cov is cast to double before the check.
(Not supported)
Returns
-------
out : ndarray
The input shape of `mean` and `cov` should satisfy the requirements of broadcasting.
If the parameter `size` is not provided,
the output shape is ``np.broadcast(mean.shape, cov.shape[:-1])``.
Otherwise, the output shape is ``size + np.broadcast(mean.shape, cov.shape[:-1])``
Examples
--------
>>> mean = np.array([1, 2])
>>> cov = np.array([[1, 0], [0, 1]])
>>> x = np.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> list((x[0,0,:] - mean) < 0.6)
[True, True] # random
# Performs autobroadcasting when the batch shape of
# `mean` and `cov` is different but compatible.
>>> mean = np.zeros((3,2)) # shape (3, 2)
>>> cov = np.array([[1, 0], [0, 100]]) # shape (2, 2)
>>> x = np.random.multivariate_normal(mean, cov)
>>> x
array([[-1.6115597 , -8.726251 ],
[ 2.2425299 , 2.8104177 ],
[ 0.36229908, -8.386591 ]])
"""
return _mx_nd_np.random.multivariate_normal(mean, cov, size=size, check_valid=None, tol=None)
def choice(a, size=None, replace=True, p=None, ctx=None, out=None):
r"""Generates a random sample from a given 1-D array
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
replace : boolean, optional
Whether the sample is with or without replacement
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
ctx : Context, optional
Device context of output. Default is current context.
Returns
--------
samples : ndarray
The generated random samples
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3)
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False)
array([3,1,0])
>>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
array([2, 3, 0])
"""
return _mx_nd_np.random.choice(a, size, replace, p, ctx, out)
def rayleigh(scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from a Rayleigh distribution.
The :math:`\chi` and Weibull distributions are generalizations of the
Rayleigh.
Parameters
----------
scale : float, optional
Scale, also equals the mode. Must be non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Rayleigh distribution.
"""
return _mx_nd_np.random.rayleigh(scale, size, ctx, out)
def rand(*size, **kwargs):
r"""Random values in a given shape.
Create an array of the given shape and populate it with random
samples from a uniform distribution over [0, 1).
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
out : ndarray
Random values.
Examples
--------
>>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
"""
output_shape = ()
for s in size:
output_shape += (s,)
return _mx_nd_np.random.uniform(0, 1, size=output_shape, **kwargs)
def exponential(scale=1.0, size=None, ctx=None, out=None):
r"""Draw samples from an exponential distribution.
Parameters
----------
scale : float or array_like of floats
The scale parameter, :math:`\beta = 1/\lambda`. Must be
non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``np.array(scale).size`` samples are drawn.
ctx : Context, optional
Device context of output, default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
"""
return _mx_nd_np.random.exponential(scale, size=size, ctx=ctx, out=out)
def weibull(a, size=None, ctx=None, out=None):
r"""Draw samples from a 1-parameter Weibull distribution with given parameter a
via inversion.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the 1-parameter Weibull distribution.
Examples
--------
>>> np.random.weibull(a=5)
array(0.9553641)
>>> np.random.weibull(a=5, size=[2,3])
array([[1.0466299 , 1.1320982 , 0.98415005],
[1.1430776 , 0.9532727 , 1.1344457 ]])
>>> np.random.weibull(a=np.array([2,3])
array([0.98843634, 1.0125613 ])
The Weibull distribution is one of a class of Generalized Extreme
Value (GEV) distributions. This class includes the Gumbel and Frechet
distributions.
The probability density for the Weibull distribution is
f(x) = \frac{a}{\lambda}(\frac{x}{\lambda})^{a-1}e^{-(x/\lambda)^a},
where a is the shape and \lambda the scale. The generated 1-parameter Weibull
sample has the scale parameter \lambda = 1.
The Weibull distribution is commonly used in reliability engineering to
model time to failure, in modeling particle sizes, in information retrieval
to model dwell time on pages, in quantitative finance to model risk etc.
"""
return _mx_nd_np.random.weibull(a, size=size, ctx=ctx, out=out)
def pareto(a, size=None, ctx=None, out=None):
r"""Draw samples from a Pareto II or Lomax distribution with specified shape a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the Pareto distribution.
Examples
--------
>>> np.random.pareto(a=5)
array(0.12749612)
>>> mx.numpy.random.pareto(a=5, size=[2,3])
array([[0.06933999, 0.0344373 , 0.10654891],
[0.0311172 , 0.12911797, 0.03370714]])
>>> np.random.pareto(a=np.array([2,3])
array([0.26636696, 0.15685666])
The probability density for the Pareto distribution is f(x) = \frac{am^a}{x^{a+1}}
where a is the shape and m the scale. Here m is assumed 1. The Pareto distribution
is a power law distribution. Pareto created it to describe the wealth in the economy.
"""
return _mx_nd_np.random.pareto(a, size=size, ctx=ctx, out=out)
def power(a, size=None, ctx=None, out=None):
r"""Draw samples in [0, 1] from a power distribution with given parameter a.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the power distribution.
Examples
--------
>>> np.random.power(a=5)
array(0.8602478)
>>> np.random.power(a=5, size=[2,3])
array([[0.988391 , 0.5153122 , 0.9383134 ],
[0.9078098 , 0.87819266, 0.730635]])
>>> np.random.power(a=np.array([2,3])
array([0.7499419 , 0.88894516])
The probability density function is f(x; a) = ax^{a-1}, 0 \le x \le 1, a>0.
The power distribution is just the inverse of the Pareto distribution and
a special case of the Beta distribution.
"""
return _mx_nd_np.random.power(a, size=size, ctx=ctx, out=out)
def shuffle(x):
"""
Modify a sequence in-place by shuffling its contents.
This function only shuffles the array along the first axis of a
multi-dimensional array. The order of sub-arrays is changed but
their contents remain the same.
Parameters
----------
x: ndarray
The array or list to be shuffled.
Examples
--------
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
array([[6., 7., 8.], # random
[3., 4., 5.],
[0., 1., 2.]])
"""
_mx_nd_np.random.shuffle(x)
def gamma(shape, scale=1.0, size=None, dtype=None, ctx=None, out=None):
"""Draw samples from a Gamma distribution.
Samples are drawn from a Gamma distribution with specified parameters,
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
The Gamma distribution is often used to model the times to failure of
electronic components, and arises naturally in processes for which the
waiting times between Poisson distributed events are relevant.
Parameters
----------
shape : float or array_like of floats
The shape of the gamma distribution. Should be greater than zero.
scale : float or array_like of floats, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized gamma distribution.
"""
return _mx_nd_np.random.gamma(shape, scale, size, dtype, ctx, out)
def beta(a, b, size=None, dtype=None, ctx=None):
r"""Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
distribution function
.. math:: f(x; a,b) = \frac{1}{B(\alpha, \beta)} x^{\alpha - 1}
(1 - x)^{\beta - 1},
where the normalisation, B, is the beta function,
.. math:: B(\alpha, \beta) = \int_0^1 t^{\alpha - 1}
(1 - t)^{\beta - 1} dt.
It is often seen in Bayesian inference and order statistics.
Parameters
----------
a : float or array_like of floats
Alpha, positive (>0).
b : float or array_like of floats
Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` and ``b`` are both scalars.
Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
Dtype 'float32' or 'float64' is strongly recommended,
since lower precision might lead to out of range issue.
ctx : Context, optional
Device context of output. Default is current context.
Notes
-----
To use this operator with scalars as input, please run
``npx.set_np()`` first.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
"""
return _mx_nd_np.random.beta(a, b, size=size, dtype=dtype, ctx=ctx)
def f(dfnum, dfden, size=None, ctx=None):
r"""Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
freedom in denominator), where both parameters must be greater than
zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
Parameters
----------
dfnum : float or ndarray of floats
Degrees of freedom in numerator, must be > 0.
dfden : float or ndarray of float
Degrees of freedom in denominator, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Fisher distribution.
Examples
--------
An example from Glantz[1], pp 47-40:
Two groups, children of diabetics (25 people) and children from people
without diabetes (25 controls). Fasting blood glucose was measured,
case group had a mean value of 86.1, controls had a mean value of
82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
data consistent with the null hypothesis that the parents diabetic
status does not affect their children's blood glucose levels?
Calculating the F statistic from the data gives a value of 36.01.
Draw samples from the distribution:
>>> dfnum = 1. # between group degrees of freedom
>>> dfden = 48. # within groups degrees of freedom
>>> s = np.random.f(dfnum, dfden, 1000)
The lower bound for the top 1% of the samples is :
>>> np.sort(s)[-10]
7.61988120985 # random
So there is about a 1% chance that the F statistic will exceed 7.62,
the measured value is 36, so the null hypothesis is rejected at the 1%
level.
"""
return _mx_nd_np.random.f(dfnum, dfden, size=size, ctx=ctx)
def chisquare(df, size=None, dtype=None, ctx=None):
r"""Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or ndarray of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
ctx : Context, optional
Device context of output. Default is current context.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized `chi-square distribution` [1]_.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size`
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
"""
return _mx_nd_np.random.chisquare(df, size=size, dtype=dtype, ctx=ctx)
def randn(*size, **kwargs):
r"""Return a sample (or samples) from the "standard normal" distribution.
If positive, int_like or int-convertible arguments are provided,
`randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
floats, they are first converted to integers by truncation). A single
float randomly sampled from the distribution is returned if no
argument is provided.
This is a convenience function. If you want an interface that takes a
tuple as the first argument, use `numpy.random.standard_normal` instead.
Parameters
----------
d0, d1, ..., dn : int, optional
The dimensions of the returned array, should be all positive.
If no argument is given a single Python float is returned.
Returns
-------
Z : ndarray
A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
the standard normal distribution, or a single such float if
no parameters were supplied.
Notes
-----
For random samples from :math:`N(\mu, \sigma^2)`, use:
``sigma * np.random.randn(...) + mu``
Examples
--------
>>> np.random.randn()
2.1923875335537315 #random
Two-by-four array of samples from N(3, 6.25):
>>> 2.5 * np.random.randn(2, 4) + 3
array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
"""
output_shape = ()
for s in size:
output_shape += (s,)
return _mx_nd_np.random.normal(0, 1, size=output_shape, **kwargs)
def laplace(loc=0.0, scale=1.0, size=None, dtype=None, ctx=None, out=None):
r"""Draw random samples from a Laplace distribution.
Samples are distributed according to a Laplace distribution parametrized
by *loc* (mean) and *scale* (the exponential decay).
Parameters
----------
loc : float, The position of the distribution peak.
scale : float, the exponential decay.
size : int or tuple of ints, optional. Output shape.
If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.
Default is None, in which case a single value is returned.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context.
out : ``ndarray``, optional
Store output to an existing ``ndarray``.
Returns
-------
out : ndarray
Drawn samples from the parameterized Laplace distribution.
"""
return _mx_nd_np.random.laplace(loc, scale, size, dtype, ctx, out)
| apache-2.0 |
jniediek/mne-python | examples/visualization/plot_evoked_topomap.py | 13 | 1606 | """
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points.
"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
# Tal Linzen <linzen@nyu.edu>
# Denis A. Engeman <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
# set time instants in seconds (from 50 to 150ms in a step of 10ms)
times = np.arange(0.05, 0.15, 0.01)
# If times is set to None only 10 regularly spaced topographies will be shown
# plot magnetometer data as topomaps
evoked.plot_topomap(times, ch_type='mag')
# compute a 50 ms bin to stabilize topographies
evoked.plot_topomap(times, ch_type='mag', average=0.05)
# plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad')
# plot magnetometer data as an animation
evoked.animate_topomap(ch_type='mag', times=times, frame_rate=10)
# plot magnetometer data as topomap at 1 time point : 100 ms
# and add channel labels and title
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
| bsd-3-clause |