repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
theoryno3/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
timqian/sms-tools | lectures/8-Sound-transformations/plots-code/sineModelFreqScale-orchestra.py | 21 | 2666 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
import sineTransformations as SMT
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
freqScaling = np.array([0, .8, 1, 1.2])
ytfreq = SMT.sineFreqScaling(tfreq, freqScaling)
y = SM.sineModelSynth(ytfreq, tmag, np.array([]), Ns, H, fs)
mY, pY = STFT.stftAnal(y, fs, w, N, H)
UF.wavwrite(y,fs, 'sineModelFreqScale-orchestra.wav')
maxplotfreq = 4000.0
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.title('x (orchestra.wav)')
plt.subplot(4,1,2)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,3)
numFrames = int(ytfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
tracks = ytfreq*np.less(ytfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1)
plt.autoscale(tight=True)
plt.title('freq-scaled sine frequencies')
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mY[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mY[:,:maxplotbin+1]))
plt.autoscale(tight=True)
plt.subplot(4,1,4)
plt.plot(np.arange(y.size)/float(fs), y, 'b')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
plt.savefig('sineModelFreqScale-orchestra.png')
plt.show()
| agpl-3.0 |
ssh0/growing-string | triangular_lattice/diecutting/result_count_on_edge.py | 1 | 9360 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-16
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
import set_data_path
class Visualizer(object):
def __init__(self, subjects):
self.data_path_list = set_data_path.data_path
if len(subjects) != 0:
for subject in subjects:
getattr(self, 'result_' + subject)()
def load_data(self, _path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
if len(size_dist_ave) == 0:
raise KeyError
return self.load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def load_data_averaged(self, _path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
# size_dist = data['size_dist']
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_all = N_all
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n_all = 6 * Ls[1:]
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def result_N(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Occupied points in the cutting region' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N$')
plt.show()
def result_N_minus_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N_minus_rate[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('The rate of not occupied site in all N' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N_{-1} / N_{\mathrm{all}}$')
plt.show()
def result_n0(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n0, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is the only member of \
a subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{0}$')
plt.show()
def result_n1(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n1, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is connected to a \
existing subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{1}$')
plt.show()
def result_n2(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n2, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
def result_n_minus(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n_minus, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is not occupied on \
the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{-1}$')
plt.show()
def result_S(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region.'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
def result_S_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
# ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
# ax.plot(self.Ls[1:], self.S[1:] / self.n_all, '.',
ax.plot(self.Ls[1:], self.S[1:] / self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region'
+ ' (normalized)'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
if __name__ == '__main__':
# subject: 'N', 'N_minus_rate', 'n0', 'n1', 'n2', 'n_minus', 'S'
main = Visualizer(
[
# 'N',
# 'N_minus_rate',
# 'n0',
# 'n1',
# 'n2',
# 'n_minus',
'S',
# 'S_rate'
]
)
| mit |
zhester/hzpy | examples/parseriff.py | 1 | 2368 | #!/usr/bin/env python
"""
Example RIFF (WAV contents) Data Parser
Sample data is written to a CSV file for analysis.
If matplotlib and numpy are available, signal plots (DFTs) are generated.
"""
import math
import os
import struct
import wave
try:
import matplotlib.pyplot as plot
import numpy
import numpy.fft as fft
except ImportError:
numeric_packages = False
else:
numeric_packages = True
#=============================================================================
def frame2mag( frame ):
( i, q ) = struct.unpack( '<BB', frame )
return math.sqrt( ( i ** 2 ) + ( q ** 2 ) )
#=============================================================================
def main( argv ):
""" Script execution entry point """
# check usage
if len( argv ) < 2:
print 'You must specify at least an input file.'
return 0
# start and length
start = 0
length = 1024
if len( argv ) > 2:
start = int( argv[ 2 ] )
if len( argv ) > 3:
length = int( argv[ 3 ] )
# open file using wave module
wfile = wave.open( argv[ 1 ], 'rb' )
# print file info
print 'Channels: %d\nSample width: %d\nFrame rate: %d\nFrames: %d' % (
wfile.getnchannels(),
wfile.getsampwidth(),
wfile.getframerate(),
wfile.getnframes()
)
# check for starting offset
if start > 0:
junk = wfile.readframes( start )
# read frames
frames = wfile.readframes( length )
samples = []
for i in range( length ):
index = i * 2
samples.append( frame2mag( frames[ index : ( index + 2 ) ] ) )
# close wave file
wfile.close()
# plot
if numeric_packages == True:
fft_data = fft.fft( samples[ : 1024 ] )
mags = numpy.absolute( fft_data )
mags_db = [ 20 * numpy.log10( mag ) for mag in mags ]
plot.figure( 1 )
plot.plot( samples )
plot.figure( 2 )
plot.plot( mags_db )
plot.show()
# output
oname = argv[ 1 ].replace( '.wav', '.csv' )
ofile = open( oname, 'wb' )
for sample in samples:
ofile.write( '%d\n' % sample )
ofile.close()
# Return success.
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
| bsd-2-clause |
ThomasSweijen/TPF | doc/sphinx/conf.py | 1 | 28022 | # -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
if 0:
refnode=addnodes.pending_xref(rawtext,reftype=role,refexplicit=explicitText,reftarget=target)
#refnode.line=lineno
#refnode+=nodes.literal(rawtext,text,classes=['ref',role])
return [refnode],[]
#ret.rawtext,reftype=role,
else:
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={V. Smilauer, E. Catalano, B. Chareyre, S. Dorofeenko, J. Duriez, A. Gladky, J. Kozicki, C. Modenese, L. Scholtes, L. Sibille, J. Stransky, K. Thoeni}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large Freelance consultant (http://woodem.eu)}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
xray/xray | xarray/core/options.py | 1 | 5201 | import warnings
DISPLAY_WIDTH = "display_width"
ARITHMETIC_JOIN = "arithmetic_join"
ENABLE_CFTIMEINDEX = "enable_cftimeindex"
FILE_CACHE_MAXSIZE = "file_cache_maxsize"
WARN_FOR_UNCLOSED_FILES = "warn_for_unclosed_files"
CMAP_SEQUENTIAL = "cmap_sequential"
CMAP_DIVERGENT = "cmap_divergent"
KEEP_ATTRS = "keep_attrs"
DISPLAY_STYLE = "display_style"
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: "inner",
ENABLE_CFTIMEINDEX: True,
FILE_CACHE_MAXSIZE: 128,
WARN_FOR_UNCLOSED_FILES: False,
CMAP_SEQUENTIAL: "viridis",
CMAP_DIVERGENT: "RdBu_r",
KEEP_ATTRS: "default",
DISPLAY_STYLE: "html",
}
_JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
_DISPLAY_OPTIONS = frozenset(["text", "html"])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),
KEEP_ATTRS: lambda choice: choice in [True, False, "default"],
DISPLAY_STYLE: _DISPLAY_OPTIONS.__contains__,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):
warnings.warn(
"The enable_cftimeindex option is now a no-op "
"and will be removed in a future version of xarray.",
FutureWarning,
)
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,
}
def _get_keep_attrs(default):
global_choice = OPTIONS["keep_attrs"]
if global_choice == "default":
return default
elif global_choice in [True, False]:
return global_choice
else:
raise ValueError(
"The global option keep_attrs must be one of" " True, False or 'default'."
)
class set_options:
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``warn_for_unclosed_files``: whether or not to issue a warning when
unclosed files are deallocated (default False). This is mostly useful
for debugging.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``keep_attrs``: rule for whether to keep attributes on xarray
Datasets/dataarrays after operations. Either ``True`` to always keep
attrs, ``False`` to always discard them, or ``'default'`` to use original
logic that attrs should only be kept in unambiguous circumstances.
Default: ``'default'``.
- ``display_style``: display style to use in jupyter for xarray objects.
Default: ``'text'``. Other options are ``'html'``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == ARITHMETIC_JOIN:
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == DISPLAY_STYLE:
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
| apache-2.0 |
jvkersch/hsmmlearn | docs/conf.py | 1 | 9948 | # -*- coding: utf-8 -*-
#
# hsmmlearn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 1 17:33:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Avoid using C libraries on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'matplotlib',
'matplotlib.pyplot', 'hsmmlearn.base']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hsmmlearn'
copyright = u'2016, Joris Vankerschaver'
author = u'Joris Vankerschaver'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hsmmlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hsmmlearn.tex', u'hsmmlearn Documentation',
u'Joris Vankerschaver', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
author, 'hsmmlearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
jmrozanec/white-bkg-classification | scripts/preprocessing.py | 1 | 1441 | #https://github.com/tflearn/tflearn/issues/180
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
import skimage
from skimage import data
from skimage import filters
import os
from skimage import io
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
reds="../images/pictures/red/"
greens="../images/pictures/green/"
redshist="../images/histograms/red/"
greenshist="../images/histograms/green/"
directory=reds
histdirectory=redshist
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
img = io.imread(os.path.join(directory, filename))
hist, bin_edges = np.histogram(img, bins=255)
bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:])
binary_img = img > 0.8
plt.figure(figsize=(1,1))
fig, ax = plt.subplots(nrows=1, ncols=1) #http://stackoverflow.com/questions/9622163/save-plot-to-image-file-instead-of-displaying-it-using-matplotlib-so-it-can-be
plt.plot(bin_centers, hist, lw=2)
fig.savefig(os.path.join(histdirectory, filename), bbox_inches='tight')
plt.close()
else:
continue
| apache-2.0 |
dimonaks/siman | siman/functions.py | 1 | 29689 |
from __future__ import division, unicode_literals, absolute_import
import os, tempfile, copy, math, itertools, sys
import numpy as np
from operator import itemgetter
from itertools import product
try:
import scipy
except:
print('functions.py: no scipy, smoother() will not work()')
from siman import header
from siman.header import print_and_log, printlog, runBash, eV_A_to_J_m
from siman.small_functions import is_list_like, is_string_like, gunzip_file, makedir, grep_file, setting_sshpass
def unique_elements(seq, idfun=None):
# return only unique_elements order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def smoother(x, n, mul = 1, align = 1):
"""
mul - additionally multiplies values
#align - find first non-zero point and return it to zero
#n - smooth value,
if algo = 'gaus' than it is sigma
use something like 0.8
if algo = 'my'
n of 10-15 is good
"""
algo = 'gaus'
# algo = 'my'
if algo == 'my':
x_smooth = []
L = len(x)
store = np.zeros((n,1),float)
for u in range(L-n):
for v in range(n):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
for u in range(L-n,L):
for v in range(L-u-1):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
elif algo == 'gaus':
x_smooth =x
# x_smooth = scipy.ndimage.filters.median_filter(x,size =4)
# print('sigma is ', n)
x_smooth = scipy.ndimage.filters.gaussian_filter1d(x_smooth, n, order =0)
# x_smooth = scipy.ndimage.interpolation.spline_filter1d(x, 4)
else:
x_smooth = x
if align:
# print(x_smooth[0])
x_smooth[0] = 0
# sys.exit()
return np.asarray(x_smooth)
def run_on_server(command, addr = None):
printlog('Running', command, 'on server ...')
command = command.replace('\\', '/') # make sure is POSIX
# sys.exit()
# print(header.sshpass)
# sys.exit()
if addr is None:
addr = header.cluster_address
if header.ssh_object:
# printlog('Using paramiko ...', imp = 'y')
# if 'ne' in header.warnings:
# sys.exit()
out = header.ssh_object.run(command, noerror = True, printout = 'ne' in header.warnings)
elif header.sshpass and header.sshpass == 'proxy':
com = 'ssh -tt sdv sshpass -f '+ header.path2pass +' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
out = out.split('Connection to')[0] # remove last message Connection to ipaddress closed
# sys.exit()
elif header.sshpass:
com = 'sshpass -f '+header.path2pass+' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# sys.exit()
else:
bash_comm = 'ssh '+addr+' "'+command+'"'
# print(bash_comm)
# sys.exit()
out = runBash(bash_comm)
out = out.split('#')[-1].strip()
printlog(out)
# print(out)
# sys.exit()
return out
def push_to_server(files = None, to = None, addr = None):
"""
if header.ssh_object then use paramiko
to (str) - path to remote folder !
"""
if not is_list_like(files):
files = [files]
to = to.replace('\\', '/') # make sure is POSIX
files_str = ' '.join(np.array(files ))
command = ' mkdir -p {:}'.format( to )
# print('asfsadfdsf', to)
printlog('push_to_server():', command, run_on_server(command, addr))
# sys.exit()
printlog('push_to_server(): uploading files ', files, 'to', addr, to)
if header.ssh_object:
for file in files:
# print(file, to)
header.ssh_object.put(file, to+'/'+os.path.basename(file) )
out = ''
elif header.sshpass and header.sshpass == 'proxy':
com = 'tar cf - '+ files_str + ' | ssh sdv "sshpass -f ~/.ssh/p ssh '+addr+' \\"cd '+header.cluster_home+' && tar xvf -\\"" '
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
# sys.exit()
elif header.sshpass:
# if '@' not in addr:
# printlog('Error! Please provide address in the form user@address')
# l = addr.split('@')
# print(l)
# user = l[0]
# ad = l[1]
# com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
# print(com)
# sys.exit()
out = runBash(com)
else:
out = runBash('rsync -uaz '+files_str+ ' '+addr+':'+to)
printlog(out)
return out
def file_exists_on_server(file, addr):
file = file.replace('\\', '/') # make sure is POSIX
printlog('Checking existence of file', file, 'on server', addr )
exist = run_on_server(' ls '+file, addr)
# if header.ssh_object:
# exist = header.ssh_object.fexists(file)
# else:
# exist = runBash('ssh '+addr+' ls '+file)
if 'No such file' in exist:
exist = ''
else:
exist = 'file exists'
if exist:
res = True
else:
res = False
printlog('File exist? ', res)
return res
def get_from_server(files = None, to = None, to_file = None, addr = None, trygz = True):
"""
Download files using either paramiko (higher priority) or rsync;
For paramiko header.ssh_object should be defined
files (list of str) - files on cluster to download
to (str) - path to local folder !
to_file (str) - path to local file (if name should be changed); in this case len(files) should be 1
The gz file is also checked
RETURN
result of download
TODO:
now for each file new connection is opened,
copy them in one connection
"""
# print(addr)
# sys.exit()
def download(file, to_file):
# print(header.sshpass)
if header.ssh_object:
exist = file_exists_on_server(file, addr)
# try:
if exist:
printlog('Using paramiko: ssh_object.get(): from to ', file, to_file)
header.ssh_object.get(file, to_file )
out = ''
# except FileNotFoundError:
else:
out = 'error, file not found'
elif header.sshpass and header.sshpass == 'proxy':
# com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar zcf - '+ file +'\\"" | tar zxf - '+to_file # does not work?
com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar cf - '+ file +'\\"" > '+to_file
# print('sshpass',com)
# sys.exit()
out = runBash(com)
elif header.sshpass:
#com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
out = runBash(com)
# print(addr)
# sys.exit()
else:
# print(addr,file,to_file)
out = runBash('rsync -uaz '+addr+':'+file+ ' '+to_file)
if 'error' in out:
res = out
else:
res = 'OK'
out = ''
printlog('Download result is ', res)
return out
if '*' in files:
printlog('get_from_server(): get by template')
files = run_on_server('ls '+files, addr).splitlines()
# print(files)
# sys.exit()
printlog('get_from_server(): I download', files)
elif not is_list_like(files):
files = [files]
files = [file.replace('\\', '/') for file in files] #make sure the path is POSIX
files_str = ', '.join(np.array(files ))
printlog('Trying to download', files_str, 'from server', imp = 'n')
for file in files:
if not to and not to_file: #use temporary file
with tempfile.NamedTemporaryFile() as f:
to_file_l = f.name #system independent filename
elif not to_file: #obtain filename
to_file_l = os.path.join(to, os.path.basename(file) )
else:
to_file_l = to_file
makedir(to_file_l)
out = download(file, to_file_l)
if out and trygz:
printlog('File', file, 'does not exist, trying gz', imp = 'n')
# run_on_server
files = run_on_server(' ls '+file+'*', addr)
file = files.split()[-1]
# print(file)
nz = file.count('gz')
ext = '.gz'*nz
# file+='.gz'
to_file_l+=ext
if file:
out = download(file, to_file_l)
printlog(' gz found with multiplicity', ext, imp = 'n')
for i in range(nz):
printlog('unzipping', to_file_l)
gunzip_file(to_file_l)
to_file_l = to_file_l[:-3]
else:
printlog(' No gz either!', imp = 'n')
# if '5247' in file:
# sys.exit()
return out
def salary_inflation():
"""Calculate salary growth in Russia taking into account inflation"""
inflation2000_2014 = [
5.34,
6.45,
6.58,
6.10,
8.78,
8.80,
13.28,
11.87,
9.00 ,
10.91,
11.74,
11.99,
15.06,
18.8,
20.1]
init_salary = 1500 # in jan 2000; other sources 2000 - very important
for i, l in enumerate( reversed(inflation2000_2014) ):
init_salary = (1+l/100)*init_salary
print( init_salary, i+2000)
salary2014 = 30000
increase = salary2014/init_salary
print( increase)
# salary_inflation()
def element_name_inv(el):
el_dict = header.el_dict
nu_dict = header.nu_dict
# print type(el), el, type(str('sdf') )
if is_string_like(el):
try:
elinv = el_dict[el]
except:
print_and_log("Error! Unknown element: " +str(el))
raise RuntimeError
else:
el = int(el)
try:
elinv = nu_dict[el]
except:
print_and_log("Error! Unknown element: "+str(el))
raise RuntimeError
return elinv # inversed notion of element
invert = element_name_inv
def return_atoms_to_cell(st):
st = st.return_atoms_to_cell()
return st
def calc_ac(a1, c1, a2, c2, a_b = 0.1, c_b = 0.1, type = "two_atoms"):
"""
Calculate values of hexagonal lattice parameters for cell with two different atoms.
The used assumption is:
1. Provided lattice constants are for large enougth cells, in which excess volume (dV) of impurity does not depend on the size of cell.
2. Two atoms do not interact with each other, which allows to use dV(CO) = dV(C) + dV(O)
Two regimes:
two_atoms - calculate cell sizes if additional atom was added
double_cell - if cell was doubled; only first cell and second_cell are needed
Input:
a1, c1 - lattice constants of cell with first impurity atom (first cell)
a2, c2 - lattice constants of cell with second impurity atom (second cell)
a_b, c_b - lattice constants of cell with pure hexagonal metall
Output:
a, c - lattice constants of cell with two atoms
"""
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
A = (a1**2 * c1) + (a2**2 * c2) - (a_b**2 * c_b)
B = 0.5 * (c1/a1 + c2/a2)
C = ( (a1**2 * c1) + (a2**2 * c2) ) * 0.5 #sum of cell volumes divided by 2 since during the construction of new cell we will use multiplication by 2
# print "A,B=",A,B
a = (A/B)**(1./3)
c = a * B
a = round(a,5)
c = round(c,5)
print_and_log( "a, c, c/a for cell with pure hcp ", a_b, c_b, round(c_b/a_b,4), imp ='y' )
print_and_log( "a, c, c/a for cell with first atom ", a1, c1, round(c1/a1,4), imp ='y' )
print_and_log( "a, c, c/a for cell with second atom ", a2, c2, round(c2/a2,4), imp ='y' )
#for double cell
a3 = (C/B)**(1./3)
c3 = a3 * B
a3 = round(a3,5)
c3 = round(c3,5)
if type == "two_atoms":
print_and_log( "a, c, c/a for cell with two atoms ", a, c, round(c/a,4), "# the same cell but with two atoms\n", imp ='y')
elif type == "double_cell":
print_and_log( "a, c, c/a for new cell ", a3, c3, round(c3/a3,4), "# for cell with V = V(first_cell) + V(second cell), but only for the case if V(second cell) == V(first_cell)", imp ='y')
return a, c
def read_charge_den_vasp():
"""
Read CHG vasp file and return ChargeDen object
"""
class ChargeDen():
"""docstring for ChargeDen"""
def __init__(self, ):
# self.arg = arg
pass
def rotation_matrix(axis,theta):
axis = axis/math.sqrt(np.dot(axis,axis))
a = math.cos(theta/2)
b,c,d = -axis*math.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def rotate():
v = np.array([3,5,0])
axis = np.array([4,4,1])
theta = 1.2
print(np.dot(rotation_matrix(axis,theta),v))
# [ 2.74911638 4.77180932 1.91629719]
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def plot_charge_den():
"""Test function; Was not used"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# print X
# print Y
# print Z
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-40, 40)
ax.set_ylabel('Y')
ax.set_ylim(-40, 40)
ax.set_zlabel('Z')
ax.set_zlim(-100, 100)
plt.show()
return
def plot_interaction(calclist, calc):
"""
For calculation of interaction parameter alpha;
Take in mind that this parameter is obtained under aproximation of redular solution
"""
e_seg = []
dX = []
for id in calclist:
Xgb = calc[id].Xgb
X = calc[id].X
dX.append(Xgb/1 - X)
e_seg.append(calc[id].e_seg)
# print calc[id].e_seg
# print calc[id].X
#print dX
coeffs1 = np.polyfit(dX, e_seg, 1)
fit_func1 = np.poly1d(coeffs1)
print( "list of seg energies: ", e_seg )
print( "list of dX : ", dX )
print( "Fitting using linear function:" )
print( fit_func1 )
print( "E_seg0 = {0:0.0f} meV, standart enthalpy of segregation".format(fit_func1[0]) )
print( "alpha = {0:0.0f} meV, interaction coefficient".format(-fit_func1[1]/2) )
return
def calculate_voronoi(self, state = 'end'):
# By default two quantities per atom are calculated by this compute.
# The first is the volume of the Voronoi cell around each atom.
# Any point in an atom's Voronoi cell is closer to that atom than any other.
# The second is the number of faces of the Voronoi cell, which
# is also the number of nearest neighbors of the atom in the middle of the cell.
# state - init or end; if init then saved in self.init.vorovol; if end than saved in self.vorovol
write_lammps(self, state, filepath = 'voronoi_analysis/structure.lammps') #write structure for lammps
runBash("rm voronoi_analysis/dump.voro; /home/aksenov/installed/lammps-1Feb14/src/lmp_serial < voronoi_analysis/voronoi.in > voronoi_analysis/log")
if state == 'end':
self.vorovol = []
self.vorofaces = []
vorovol = self.vorovol
vorofaces = self.vorofaces
elif state == 'init':
self.init.vorovol = []
self.init.vorofaces = []
vorovol = self.init.vorovol
vorofaces = self.init.vorofaces
vsum=0
wlist = []
with open('voronoi_analysis/dump.voro','r') as volfile: #analyze dump.voro
for line in volfile:
if 'ITEM: ATOMS ' in line:
break
for line in volfile:
ll = line.split()
if int(ll[1]) > 1:
wlist.append( [ll[0], ll[5], ll[6], ll[2]] )
# print 'Volume of atom ',ll[0],'is', ll[5]
vsum= vsum+float(ll[5])
print_and_log( 'Check total volume ', vsum, self.end.vol)
wlist.sort(key = itemgetter(0)) #sort according to the position of atoms
print_and_log( "atom #, voronoi vol, voronoi faces, x coordinate: ", )
print_and_log( wlist)
for w in wlist:
vorovol.append(float(w[1]))
vorofaces.append(int(w[2]))
# print 'Voro vol ',self.end.vorovol
# print 'Voro faces',self.end.vorofaces
# print len(wlist)
if hasattr(self, 'vorovol'):
voro = ''
if len(vorovol) == 2: #C and O
voro = " {0:5.2f} & {1:2d} & {2:5.2f} & {3:2d} ".format(vorovol[0], vorofaces[0], vorovol[1], vorofaces[1] ).center(25)
else:
voro = " {0:5.2f} & {1:2d} ".format(vorovol[0], vorofaces[0] ).center(25)
voro+='&'
else:
voro = ""
print_and_log( "Voronoi volume = ", voro, imp = 'y')
return voro
def log_history(hstring):
try:
if hstring != header.history[-1]: header.history.append( hstring )
except:
header.history.append( hstring )
return
def gb_energy_volume(gb,bulk):
if (gb.end.rprimd[1] != bulk.end.rprimd[1]).any() or (gb.end.rprimd[2] != bulk.end.rprimd[2]).any():
print_and_log("Warning! You are trying to calculate gb_energy from cells with different lateral sizes:"+str(gb.end.rprimd)+" "+str(bulk.end.rprimd)+"\n")
#print bulk.vol
V_1at = bulk.vol / bulk.natom #* to_ang**3
E_1at = bulk.energy_sigma0 / bulk.natom
A = np.linalg.norm( np.cross(gb.end.rprimd[1], gb.end.rprimd[2]) ) #surface area of gb
#print A
gb.v_gb = ( gb.vol - V_1at * gb.natom) / A / 2. * 1000
gb.e_gb = ( gb.energy_sigma0 - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.e_gb_init = ( gb.list_e_sigma0[0] - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.bulk_extpress = bulk.extpress
#print "Calc %s; e_gb_init = %.3f J/m^2; e_gb = %.3f J/m; v_gb = %.3f angstrom "%(gb.name, gb.e_gb_init, gb.e_gb, gb.v_gb )
outst = "%15s&%7.0f&%7.0f"%(gb.name, gb.e_gb, gb.v_gb)
return outst
def headers():
j = (7,12,14,7,8,9,9,5,5,20,5,20,8,12,20,8,5,8,8)
d="&"
header_for_bands= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"nband"+d+"Added, \%"+"\\\\"
header_for_ecut= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Ecut,eV"+"\\\\"
header_for_npar= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"NPAR".center(j[16])+d+"LPLANE".center(j[17])+"\\\\"
header_for_kpoints= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"k-spacings".center(j[9])+d+"nkpt".center(j[10])+"\\\\"
header_for_tsmear= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"tsmear, meV".center(j[13])+d+"Smearing error, meV/atom".center(j[14])+"\\\\"
header_for_stress= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Stress, intr u.*1000".center(j[11])+d+"Pressure, MPa".center(j[12])
#print "\\hline"
return header_for_kpoints
def read_vectors(token, number_of_vectors, list_of_words, type_func = None, lists = False):
"""Returns the list of numpy vectors for the last match"""
# lists - return list of lists instead list of vectors
if type_func is None:
type_func = lambda a : float(a)
number_of_matches = list_of_words.count( token )
if number_of_matches == 0:
#print_and_log("Warning token '"+token+"' was not found! return empty\n")
return [None]
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
#print list_of_words[index]
list_of_vectors = []
list_of_lists = []
vector = np.zeros((3))
for i in range(number_of_vectors):
vector[0] = type_func(list_of_words[index + 1])
vector[1] = type_func(list_of_words[index + 2])
vector[2] = type_func(list_of_words[index + 3])
list3 = []
for j in 1,2,3:
list3.append(type_func(list_of_words[index + j]) )
index+=3
list_of_vectors.append(vector.copy())
list_of_lists.append(list3)
if lists:
out = list_of_lists
else:
out = list_of_vectors
return out
def read_string(token, length, string):
sh = len(token)+1
i = string.find(token)+sh
# print('length', i, i+length)
# sys.exit()
if i is -1:
return ''
else:
return string[i:i+length]
def read_list(token, number_of_elements, ttype, list_of_words):
"""Input is token to find, number of elements to read, type of elements and list of words,
where to search
Returns the list of elements for the last match"""
number_of_matches = list_of_words.count( token )
#if number_of_elements == 0: raise RuntimeError
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
if number_of_matches == 0 or number_of_elements == 0:
#print_and_log("Warning token '"+token+"' was not found or asked number of elements is zero! set to [None]\n")
#if ttype == str:
# return ['']*number_of_elements
#else:
# return [0]*number_of_elements
return [None]
try:
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
except ValueError:
print_and_log("Warning!, token "+token+" was not found. I return [None]!\n")
return [None]
index+=1 #the position of token value
list_of_elements = []
#define function dependig on type:
if ttype == int :
def convert(a):
return int(a)
elif ttype == float:
def convert(a):
# print a
return float(a)
elif ttype == str :
def convert(a):
return str(a)
#print list_of_words[index], type(list_of_words[index])
if list_of_words[index] == "None" :
def convert(a):
return [None]
#Make convertion
for i in range(number_of_elements):
if 'None' in list_of_words[index]:
list_of_elements.append(None)
else:
list_of_elements.append( convert( list_of_words[index] ) )
index+=1
return list_of_elements
def words(fileobj):
"""Generator of words. However does not allow to use methods of list for returned"""
for line in fileobj:
for word in line.split():
yield word
def server_cp(copy_file, to, gz = True, scratch = False, new_filename = None):
if scratch:
if not header.PATH2ARCHIVE:
printlog('Warning! PATH2ARCHIVE is empty! Please put path archive in ~/simanrc.py or ./project_conf.py ')
copy_file = header.PATH2ARCHIVE + '/' + copy_file
else:
copy_file = header.project_path_cluster + '/' + copy_file
filename = os.path.basename(copy_file)
if new_filename is None:
new_filename = filename
if gz:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename + '.gz ; gunzip -f '+ to+ '/'+new_filename+'.gz'
else:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename
printlog('Running on server', command, imp = '')
if file_exists_on_server(copy_file, header.cluster_address):
out = run_on_server(command, addr = header.cluster_address)
printlog('Output of run_on_server', out, imp = '')
else:
out = 'error, file does not exist on server: '+copy_file
return out
def wrapper_cp_on_server(file, to, new_filename = None):
"""
tries iterativly scratch and gz
"""
copy_to = to
copy_file = file
filename = os.path.basename(file)
if new_filename:
app = 'with new name '+new_filename
else:
app = ''
for s, gz in product([0,1], ['', '.gz']):
printlog('scratch, gz:', s, gz)
out = server_cp(copy_file+gz, to = to, gz = gz, scratch = s, new_filename = new_filename)
if out == '':
printlog('File', filename, 'was succesfully copied to',to, app, imp = 'y')
break
# else:
else:
printlog('Warning! File was not copied, probably it does not exist. Try using header.warnings = "neyY" for more details', imp = 'y')
return
def update_incar(parameter = None, value = None, u_ramp_step = None, write = True, f = None, run = False, st = None):
"""Modifications of INCAR. Take attention that *parameter* will be changed to new *value*
if it only already exist in INCAR. *u_ramp_step*-current step to determine u,
*write*-sometimes just the return value is needed.
Returns U value corresponding to *u_ramp_step*.
"""
self = st
u_step = None
if parameter == 'LDAUU':
#Update only non-zero elements of LDAUU with value
set_LDAUU_list = self.set.vasp_params['LDAUU']
new_LDAUU_list = copy.deepcopy(set_LDAUU_list)
# print set_LDAUU_list
u_step = 0.0
for i, u in enumerate(set_LDAUU_list):
if u == 0:
continue
u_step = np.linspace(0, u, self.set.u_ramping_nstep)[u_ramp_step]
u_step = np.round(u_step, 1)
# new_LDAUU_list[i] = value
new_LDAUU_list[i] = u_step
new_LDAUU = 'LDAUU = '+' '.join(['{:}']*len(new_LDAUU_list)).format(*new_LDAUU_list)
command = "sed -i.bak '/LDAUU/c\\" + new_LDAUU + "' INCAR\n"
#print('u_step',u_step)
#sys.exit()
elif parameter == 'MAGMOM':
new_incar_string = parameter + ' = ' + ' '.join(['{:}']*len(value)).format(*value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
# elif parameter in ['IMAGES', 'ISPIN']:
else:
new_incar_string = parameter + ' = ' + str(value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
if write and f:
f.write(command)
if run:
runBash(command)
return u_step #for last element
def check_output(filename, check_string, load):
"""
Check if file exist and it is finished by search for check_string
"""
if filename and os.path.exists(filename):
out = grep_file(check_string, filename, reverse = True)
printlog('The grep result of',filename, 'is:', out)
# sys.exit()
if check_string in out or 'un' in load:
state = '4. Finished'
else:
state = '5. Broken outcar'
else:
state = '5. no OUTCAR'
return state
| gpl-2.0 |
yuvrajsingh86/DeepLearning_Udacity | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
ravenshooter/BA_Analysis | Preprocess.py | 1 | 5604 |
import numpy
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy
import mdp
import csv
from thread import start_new_thread
import DataSet
from DataAnalysis import plot
from Main import getProjectPath
def readFileToNumpy(fileName):
reader=csv.reader(open(fileName,"rb"),delimiter=',')
x=list(reader)
return numpy.array(x[1:]).astype('float')
def separateInputData(fileData,removeErrors=True):
if removeErrors:
error_inds = fileData[:,-1]==False
fileData = fileData[error_inds]
fused = numpy.atleast_2d(fileData[:,1:4])
gyro = numpy.atleast_2d(fileData[:,4:7])
acc = numpy.atleast_2d(fileData[:,7:10])
targets = numpy.atleast_2d(fileData[:,10:])
return fused, gyro, acc, targets
def transformToDelta(vals):
newVals = numpy.zeros((len(vals),len(vals[0])))
for i in range(1,len(vals)):
newVals[i-1] = vals[i]-vals[i-1]
return newVals
def removeLOverflow(fused):
for j in range(0,3):
for i in range(1,len(fused)):
if numpy.abs(fused[i-1,j] - fused[i,j]) > numpy.pi:
fused[i:,j] = fused[i:,j] * -1
return fused
def applyActivationFilter(inputData, width):
actLevel = numpy.sum(numpy.abs(inputData),1)
target = numpy.zeros((len(inputData),1))
for i in range(width,len(inputData-width)):
target[i] = numpy.mean(actLevel[i-width:i+width])
return target
def centerAndNormalize(inputData):
means = numpy.mean(inputData, 0)
centered = inputData - means
vars = numpy.std(centered, 0)
normalized = centered/vars
return normalized, means, vars
def getTrainingBeginAndEndIndex(targetSig):
beginInd = 0
endInd = len(targetSig)
for i in range(0,len(targetSig)):
if targetSig[i] == 1:
beginInd= i-1;
break
for i in range(0,len(targetSig)):
if targetSig[len(targetSig)-1-i] == 1:
endInd= len(targetSig)-i;
break
return beginInd,endInd
def formatDataSet(data):
print data.shape
newStart = input("Start:")
newEnd = input("End:")
newData = data[newStart:newEnd,:]
return newData
def formatTargetFilter(data):
treshold = input('Treshold:')
targetFunction = applyFormatTargetFilter(data, treshold)
plt.figure()
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.plot(targetFunction)
return targetFunction
def applyFormatTargetFilter(data, treshold):
targetFunction = (data[:,10] > treshold).astype(float)
return numpy.atleast_2d(targetFunction).T
def removeArea(data):
cutOutStart = input("Start:")
cutOutEnd = input("End:")
newDataStart = data[:cutOutStart,:]
newDataEnd = data[cutOutEnd:,:]
return numpy.concatenate((newDataStart,newDataEnd))
def plotData(data):
plt.figure()
plt.clf()
plt.subplot(411)
plt.title('Fused')
plt.plot(data[:,0:3])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(412)
plt.title('Gyro')
plt.plot(data[:,3:6])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(413)
plt.title('Acc')
plt.plot(data[:,6:9])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(414)
plt.title('Targets')
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.show()
def writeToCSV(data,fileName):
numpy.savetxt(getProjectPath()+"\\dataSets\\"+fileName+".csv", data, delimiter=";")
def safeToDataSet(fileName, data, means, stds, gestures, targetTreshold):
ds = DataSet.DataSet(data[:,0:3],data[:,3:6],data[:,6:9],numpy.append(data[:,9:], applyFormatTargetFilter(data, targetTreshold), 1), \
means, stds, gestures)
ds.writeToFile(fileName)
def load(nr):
global i
plt.close('all')
i = readFile("nadja\\nadja_"+str(nr)+".csv")
plotData(i)
def safe(inputData,aaa,nr):
writeToCSV(numpy.concatenate((inputData,numpy.atleast_2d(aaa).T),1),"nadja_fitted_"+str(nr))
def readFile(fileName):
return readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
if __name__ == '__main__':
#def main():
inputFileName = ["2016-03-14-10-30-47-nike_fullSet_0.csv"]
fileData = numpy.zeros((1,31))
for fileName in inputFileName:
newData = readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
print newData.shape
fileData = numpy.append(fileData,newData,0)
fused, gyro, acc, targets = separateInputData(fileData)
#fused = removeLOverflow(fused)
#fused = transformToDelta(fused)
_, f_means, f_stds = centerAndNormalize(fused)
_, g_means, g_stds = centerAndNormalize(gyro)
_, a_means, a_stds = centerAndNormalize(acc)
means = numpy.concatenate((f_means,g_means,a_means),0)
stds = numpy.concatenate((f_stds,g_stds,a_stds),0)
gestures = numpy.max(targets,0)
dataSets = []
gestureSets = []
for i in range(0,len(targets[0])):
start, end = getTrainingBeginAndEndIndex(targets[:,i])
t_fused = fused[start:end,:]
t_gyro = gyro[start:end,:]
t_acc = acc[start:end,:]
t_target =numpy.atleast_2d(targets[start:end,i]).T
t_accFilter = applyActivationFilter(numpy.concatenate((t_fused,t_gyro,t_acc),1),6)
a = numpy.concatenate((t_fused,t_gyro,t_acc,t_target,t_accFilter),1)
dataSets.append(a)
gestureSets.append(numpy.max(targets[start:end,:],0))
| mit |
junwoo091400/MyCODES | Projects/FootPad_Logger/logged_data_analyzer_LSTM/RNN_LSTM.py | 1 | 2131 |
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import ipdb
def RNN_LSTM(batch_size_in = 5, total_len_in = 30000, pad_len_in = 5, backprop_len_in = 50, state_size_in = 10, num_class_in = 32):
# total_len_in = (backprop_len_in) * (num_batches)
# Get inputs.
batch_size = batch_size_in
total_series_length = total_len_in
pad_length = pad_len_in
truncated_backprop_length = backprop_len_in
state_size = state_size_in
num_classes = num_class_in
num_batches = total_series_length // truncated_backprop_length
#Model generate
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length, pad_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
cell_state = tf.placeholder(tf.float32, [batch_size, state_size])
hidden_state = tf.placeholder(tf.float32, [batch_size, state_size])
init_state = tf.nn.rnn_cell.LSTMStateTuple(cell_state, hidden_state)
# LSTM -> classes.
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1) # Becomes [truncated_len, batch_size]
# Forward passes
cell = tf.contrib.rnn.BasicLSTMCell(state_size, state_is_tuple=True)
states_series, current_state = tf.contrib.rnn.static_rnn(cell, inputs_series, init_state)#Input 'init_state' + 'inputs_series' + 'cell'
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
return (batchX_placeholder, batchY_placeholder, cell_state, hidden_state, current_state, predictions_series, W2, b2, cell, train_step, total_loss) | gpl-3.0 |
AIML/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
eistre91/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
hrjn/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
boada/planckClusters | MOSAICpipe/bpz-1.99.3/bpz.py | 1 | 52171 | """
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
rolex = watch()
rolex.set()
#from Numeric import *
from numpy import *
from bpz_tools import *
from string import *
import os, glob, sys
import time
import pickle
import shelve
from coetools import pause, params_cl
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask == None:
mask = greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] == False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', #Apply Madau correction to spectra
'Z_THR': 0, #Integrate probability for z>z_thr
'COLOR': 'no', #Use colors instead of fluxes
'PLOTS': 'no', #Don't produce plots
'INTERACTIVE': 'yes', #Don't query the user
'PHOTO_ERRORS':
'no', #Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, #"Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
from pylab import *
# from coeplot2a import *
plot([1])
title('KILL THIS WINDOW!')
show()
ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: #Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] != None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. #Objects not observed
undet = 99. #Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = array(z)
else:
z = array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(less(f_mod[:,it,jf],0.)):
if less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=where(less(f_mod[:,it,jf],1e-100)*greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = zeros((nz, nti, nf)) * 1.
tipos = arange(0., float(nti), float(ninterp) + 1.)
xtipos = arange(float(nti))
for iz in arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = greater(f_obs, 0.) * less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = where(
greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = where(no_seen, 0., f_obs)
ef_obs = where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = where(no_observed, 0., f_obs)
ef_obs = where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(less(ef_obs,0.)): raise 'Negative input flux errors'
if less(ef_obs, 0.).any():
raise ValueError('Negative input flux errors')
f_obs = where(less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = where(
less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(less(f_obs,0.)) : raise 'Negative input fluxes'
seen = greater(f_obs, 0.) * greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = array(list(map(float, zp_offsets)))
zp_offsets = where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=zeros((ng,nf),float)+1.
#dm=zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = ones((ng, nf), float)
dmag = ones((ng, nf), float)
fw = zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = array([float(pars.d['ZC'])])
else: zc = array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = array([float(pars.d['FC'])])
else: fc = array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = where(
greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = less(p_bayes[2:], p_bayes[1:-1]) * less(p_bayes[:-2],
p_bayes[1:-1])
g_min = greater(p_bayes[2:], p_bayes[1:-1]) * greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * greater(p_bayes[:-2], 0.)
i_max = compress(g_max, arange(nz - 2)) + 1
i_min = compress(g_min, arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(array(p_tot)) != 1.:
p_tot = old_div(array(p_tot), sum(array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = where(bad, 0., fo)
ft = where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = where(greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = where(less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = where(greater(fo, 0.), 1, 0.)
#w[ig,:]=where(greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=where(greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=where(greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = where(less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=where(greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(less_equal(r[:,j],1.),r[:,j])))
# zz=compress(less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= less_equal(tb, t_max) * greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
arange(
min(zso), max(zso) + 0.01, 0.01), arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * less_equal(m_0, mg_max) * greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = greater(fml, 1e-100) * greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
| mit |
larsmans/scipy | scipy/stats/_discrete_distns.py | 6 | 21338 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
# Override rv_discrete._argcheck to allow mu=0.
def _argcheck(self, mu):
return mu >= 0
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = special.xlogy(k, mu) - gamln(k + 1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
mu_nonzero = tmp > 0
g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf)
g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
qPCR4vir/orange | Orange/projection/mds.py | 6 | 14713 | """
.. index:: multidimensional scaling (mds)
.. index::
single: projection; multidimensional scaling (mds)
**********************************
Multidimensional scaling (``mds``)
**********************************
The functionality to perform multidimensional scaling
(http://en.wikipedia.org/wiki/Multidimensional_scaling).
The main class to perform multidimensional scaling is
:class:`Orange.projection.mds.MDS`
.. autoclass:: Orange.projection.mds.MDS
:members:
:exclude-members: Torgerson, get_distance, get_stress, calc_stress, run
.. automethod:: calc_stress(stress_func=SgnRelStress)
.. automethod:: run(iter, stress_func=SgnRelStress, eps=1e-3, progress_callback=None)
Stress functions
================
Stress functions that can be used for MDS have to be implemented as functions
or callable classes:
.. method:: \ __call__(correct, current, weight=1.0)
Compute the stress using the correct and the current distance value (the
:obj:`Orange.projection.mds.MDS.distances` and
:obj:`Orange.projection.mds.MDS.projected_distances` elements).
:param correct: correct (actual) distance between elements, represented by
the two points.
:type correct: float
:param current: current distance between the points in the MDS space.
:type current: float
This module provides the following stress functions:
* :obj:`SgnRelStress`
* :obj:`KruskalStress`
* :obj:`SammonStress`
* :obj:`SgnSammonStress`
Examples
========
MDS Scatterplot
---------------
The following script computes the Euclidean distance between the data
instances and runs MDS. Final coordinates are plotted with matplotlib
(not included with orange, http://matplotlib.sourceforge.net/).
Example (:download:`mds-scatterplot.py <code/mds-scatterplot.py>`)
.. literalinclude:: code/mds-scatterplot.py
:lines: 7-
The script produces a file *mds-scatterplot.py.png*. Color denotes
the class. Iris is a relatively simple data set with respect to
classification; to no surprise we see that MDS finds such instance
placement in 2D where instances of different classes are well separated.
Note that MDS has no knowledge of points' classes.
.. image:: files/mds-scatterplot.png
A more advanced example
-----------------------
The following script performs 10 steps of Smacof optimization before computing
the stress. This is suitable if you have a large dataset and want to save some
time.
Example (:download:`mds-advanced.py <code/mds-advanced.py>`)
.. literalinclude:: code/mds-advanced.py
:lines: 7-
A few representative lines of the output are::
<-0.633911848068, 0.112218663096> [5.1, 3.5, 1.4, 0.2, 'Iris-setosa']
<-0.624193906784, -0.111143872142> [4.9, 3.0, 1.4, 0.2, 'Iris-setosa']
...
<0.265250980854, 0.237793982029> [7.0, 3.2, 4.7, 1.4, 'Iris-versicolor']
<0.208580598235, 0.116296850145> [6.4, 3.2, 4.5, 1.5, 'Iris-versicolor']
...
<0.635814905167, 0.238721415401> [6.3, 3.3, 6.0, 2.5, 'Iris-virginica']
<0.356859534979, -0.175976261497> [5.8, 2.7, 5.1, 1.9, 'Iris-virginica']
...
"""
import numpy
from numpy.linalg import svd
import Orange.core
from Orange import orangeom as orangemds
from Orange.utils import deprecated_keywords
from Orange.utils import deprecated_members
KruskalStress = orangemds.KruskalStress()
SammonStress = orangemds.SammonStress()
SgnSammonStress = orangemds.SgnSammonStress()
SgnRelStress = orangemds.SgnRelStress()
PointList = Orange.core.FloatListList
FloatListList = Orange.core.FloatListList
def _mycompare((a,aa),(b,bb)):
if a == b:
return 0
if a < b:
return -1
else:
return 1
class PivotMDS(object):
def __init__(self, distances=None, pivots=50, dim=2, **kwargs):
self.dst = numpy.array([m for m in distances])
self.n = len(self.dst)
if type(pivots) == type(1):
self.k = pivots
self.pivots = numpy.random.permutation(len(self.dst))[:pivots]
#self.pivots.sort()
elif type(pivots) == type([]):
self.pivots = pivots
#self.pivots.sort()
self.k = len(self.pivots)
else:
raise AttributeError('pivots')
def optimize(self):
# # Classical MDS (Torgerson)
# J = identity(self.n) - (1/float(self.n))
# B = -1/2. * dot(dot(J, self.dst**2), J)
# w,v = linalg.eig(B)
# tmp = zip([float(val) for val in w], range(self.n))
# tmp.sort()
# w1, w2 = tmp[-1][0], tmp[-2][0]
# v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
# return v1 * sqrt(w1), v2 * sqrt(w2)
# Pivot MDS
d = self.dst[[self.pivots]].T
C = d**2
# double-center d
cavg = numpy.sum(d, axis=0)/(self.k+0.0) # column sum
ravg = numpy.sum(d, axis=1)/(self.n+0.0) # row sum
tavg = numpy.sum(cavg)/(self.n+0.0) # total sum
# TODO: optimize
for i in xrange(self.n):
for j in xrange(self.k):
C[i,j] += -ravg[i] - cavg[j]
C = -0.5 * (C + tavg)
w,v = numpy.linalg.eig(numpy.dot(C.T, C))
tmp = zip([float(val) for val in w], range(self.n))
tmp.sort()
w1, w2 = tmp[-1][0], tmp[-2][0]
v1, v2 = v[:, tmp[-1][1]], v[:, tmp[-2][1]]
x = numpy.dot(C, v1)
y = numpy.dot(C, v2)
return x, y
@deprecated_members(
{"projectedDistances": "projected_distances",
"originalDistances": "original_distances",
"avgStress": "avg_stress",
"progressCallback": "progress_callback",
"getStress": "calc_stress",
"get_stress": "calc_stress",
"calcStress": "calc_stress",
"getDistance": "calc_distance",
"get_distance": "calc_distance",
"calcDistance": "calc_distance",
"Torgerson": "torgerson",
"SMACOFstep": "smacof_step",
"LSMT": "lsmt"})
class MDS(object):
"""
Main class for performing multidimensional scaling.
:param distances: original dissimilarity - a distance matrix to operate on.
:type distances: :class:`Orange.misc.SymMatrix`
:param dim: dimension of the projected space.
:type dim: int
:param points: an initial configuration of points (optional)
:type points: :class:`Orange.core.FloatListList`
An instance of MDS object has the following attributes and functions:
.. attribute:: points
Holds the current configuration of projected points in an
:class:`Orange.core.FloatListList` object.
.. attribute:: distances
An :class:`Orange.misc.SymMatrix` containing the distances that we
want to achieve (lsmt changes these).
.. attribute:: projected_distances
An :class:`Orange.misc.SymMatrix` containing the distances between
projected points.
.. attribute:: original_distances
An :class:`Orange.misc.SymMatrix` containing the original distances
between points.
.. attribute:: stress
An :class:`Orange.misc.SymMatrix` holding the stress.
.. attribute:: dim
An integer holding the dimension of the projected space.
.. attribute:: n
An integer holding the number of elements (points).
.. attribute:: avg_stress
A float holding the average stress in the :obj:`stress` matrix.
.. attribute:: progress_callback
A function that gets called after each optimization step in the
:func:`run` method.
"""
def __init__(self, distances=None, dim=2, **kwargs):
self.mds=orangemds.MDS(distances, dim, **kwargs)
self.original_distances=Orange.misc.SymMatrix([m for m in self.distances])
def __getattr__(self, name):
if name in ["points", "projected_distances", "distances" ,"stress",
"progress_callback", "n", "dim", "avg_stress"]:
#print "rec:",name
return self.__dict__["mds"].__dict__[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
#print "setattr"
if name=="points":
for i in range(len(value)):
for j in range(len(value[i])):
self.mds.points[i][j]=value[i][j]
return
if name in ["projected_distances", "distances" ,"stress",
"progress_callback"]:
self.mds.__setattr__(name, value)
else:
self.__dict__[name]=value
def __nonzero__(self):
return True
def smacof_step(self):
"""
Perform a single iteration of a Smacof algorithm that optimizes
:obj:`stress` and updates the :obj:`points`.
"""
self.mds.SMACOFstep()
def calc_distance(self):
"""
Compute the distances between points and update the
:obj:`projected_distances` matrix.
"""
self.mds.get_distance()
@deprecated_keywords({"stressFunc": "stress_func"})
def calc_stress(self, stress_func=SgnRelStress):
"""
Compute the stress between the current :obj:`projected_distances` and
:obj:`distances` matrix using *stress_func* and update the
:obj:`stress` matrix and :obj:`avgStress` accordingly.
"""
self.mds.getStress(stress_func)
@deprecated_keywords({"stressFunc": "stress_func"})
def optimize(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
self.mds.progress_callback=progress_callback
self.mds.optimize(iter, stress_func, eps)
@deprecated_keywords({"stressFunc": "stress_func"})
def run(self, iter, stress_func=SgnRelStress, eps=1e-3,
progress_callback=None):
"""
Perform optimization until stopping conditions are met.
Stopping conditions are:
* optimization runs for *iter* iterations of smacof_step function, or
* stress improvement (old stress minus new stress) is smaller than
eps * old stress.
:param iter: maximum number of optimization iterations.
:type iter: int
:param stress_func: stress function.
"""
self.optimize(iter, stress_func, eps, progress_callback)
def torgerson(self):
"""
Run the Torgerson algorithm that computes an initial analytical
solution of the problem.
"""
# Torgerson's initial approximation
O = numpy.array([m for m in self.distances])
## #B = matrixmultiply(O,O)
## # bug!? B = O**2
## B = dot(O,O)
## # double-center B
## cavg = sum(B, axis=0)/(self.n+0.0) # column sum
## ravg = sum(B, axis=1)/(self.n+0.0) # row sum
## tavg = sum(cavg)/(self.n+0.0) # total sum
## # B[row][column]
## for i in xrange(self.n):
## for j in xrange(self.n):
## B[i,j] += -cavg[j]-ravg[i]
## B = -0.5*(B+tavg)
# B = double-center O**2 !!!
J = numpy.identity(self.n) - (1/numpy.float(self.n))
B = -0.5 * numpy.dot(numpy.dot(J, O**2), J)
# SVD-solve B = ULU'
#(U,L,V) = singular_value_decomposition(B)
(U,L,V)=svd(B)
# X = U(L^0.5)
# # self.X = matrixmultiply(U,identity(self.n)*sqrt(L))
# X is n-dimensional, we take the two dimensions with the largest singular values
idx = numpy.argsort(L)[-self.dim:].tolist()
idx.reverse()
Lt = numpy.take(L,idx) # take those singular values
Ut = numpy.take(U,idx,axis=1) # take those columns that are enabled
Dt = numpy.identity(self.dim)*numpy.sqrt(Lt) # make a diagonal matrix, with squarooted values
self.points = Orange.core.FloatListList(numpy.dot(Ut,Dt))
self.freshD = 0
# D = identity(self.n)*sqrt(L) # make a diagonal matrix, with squarooted values
# X = matrixmultiply(U,D)
# self.X = take(X,idx,1)
# Kruskal's monotone transformation
def lsmt(self):
"""
Execute Kruskal monotone transformation.
"""
# optimize the distance transformation
# build vector o
effect = 0
self.getDistance()
o = []
for i in xrange(1,self.n):
for j in xrange(i):
o.append((self.original_distances[i,j],(i,j)))
o.sort(_mycompare)
# find the ties in o, and construct the d vector sorting in order within ties
d = []
td = []
uv = [] # numbers of consecutively tied o values
(i,j) = o[0][1]
distnorm = self.projected_distances[i,j]*self.projected_distances[i,j]
td = [self.projected_distances[i,j]] # fetch distance
for l in xrange(1,len(o)):
# copy now sorted distances in an array
# but sort distances within a tied o
(i,j) = o[l][1]
cd = self.projected_distances[i,j]
distnorm += self.projected_distances[i,j]*self.projected_distances[i,j]
if o[l][0] != o[l-1][0]:
# differing value, flush
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
td = []
td.append(cd)
sum = reduce(lambda x,y:x+y,td)+0.0
d.append([sum,len(td),sum/len(td),td])
####
# keep merging non-monotonous areas in d
monotony = 0
while not monotony and len(d) > 1:
monotony = 1
pi = 0 # index
n = 1 # n-areas
nd = []
r = d[0] # current area
for i in range(1,len(d)):
tr = d[i]
if r[2]>=tr[2]:
monotony = 0
effect = 1
r[0] += tr[0]
r[1] += tr[1]
r[2] = tr[0]/tr[1]
r[3] += tr[3]
else:
nd.append(r)
r = tr
nd.append(r)
d = nd
# normalizing multiplier
sum = 0.0
for i in d:
sum += i[2]*i[2]*i[1]
f = numpy.sqrt(distnorm/numpy.max(sum,1e-6))
# transform O
k = 0
for i in d:
for j in range(i[1]):
(ii,jj) = o[k][1]
self.distances[ii,jj] = f*i[2]
k += 1
assert(len(o) == k)
self.freshD = 0
return effect
| gpl-3.0 |
maryklayne/Funcao | examples/intermediate/mplot3d.py | 14 | 1261 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x,y,z) #seems to be a bug in matplotlib
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
rkmaddox/mne-python | examples/visualization/topo_compare_conditions.py | 20 | 1828 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
| bsd-3-clause |
esatel/ADCPy | doc/source/conf.py | 1 | 8929 | # -*- coding: utf-8 -*-
#
# ADCpy documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 07 11:54:34 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.doctest','numpydoc',
'sphinx.ext.autosummary']
#'numpydoc']
#'ipython_console_highlighting',
#'inheritance_diagram',
#'numpydoc']
autodoc_member_order = 'alphabetical'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ADCPy'
copyright = u'2014, California Department of Water Resources'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'dwrsmall.gif'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# This prevents the weird 2-index result if you use numpydoc
html_domain_indices = ['py-modindex']
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ADCPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ADCPy.tex', u'ADCPy Documentation',
u'Benjamin Saenz, David Ralston, Rusty Holleman,\nEd Gross, Eli Ateljevich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = ['py-modindex']
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'adcpy', u'ADCpy Documentation',
[u'Benjamin Saenz, David Ralston, Rusty Holleman, Ed Gross, Eli Ateljevich'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ADCpy', u'ADCpy Documentation',
u'Benjamin Saenz, David Ralston, Rusty Holleman, Ed Gross, Eli Ateljevich', 'ADCPy', 'Tools for ADCP analysis and visualization.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
vdt/SimpleCV | SimpleCV/examples/util/ColorCube.py | 13 | 1901 | from SimpleCV import Image, Camera, Display, Color
import pygame as pg
import numpy as np
from pylab import *
from mpl_toolkits.mplot3d import axes3d
from matplotlib.backends.backend_agg import FigureCanvasAgg
import cv2
bins = 8
#precompute
idxs = []
colors = []
offset = bins/2
skip = 255/bins
for x in range(0,bins):
for y in range(0,bins):
for z in range(0,bins):
b = ((x*skip)+offset)/255.0
g = ((y*skip)+offset)/255.0
r = ((z*skip)+offset)/255.0
idxs.append((x,y,z,(r,g,b)))
# plot points in 3D
cam = Camera()
disp = Display((800,600))
fig = figure()
fig.set_size_inches( (10,7) )
canvas = FigureCanvasAgg(fig)
azim = 0
while disp.isNotDone():
ax = fig.gca(projection='3d')
ax.set_xlabel('BLUE', color=(0,0,1) )
ax.set_ylabel('GREEN',color=(0,1,0))
ax.set_zlabel('RED',color=(1,0,0))
# Get the color histogram
img = cam.getImage().scale(0.3)
rgb = img.getNumpyCv2()
hist = cv2.calcHist([rgb],[0,1,2],None,[bins,bins,bins],[0,256,0,256,0,256])
hist = hist/np.max(hist)
# render everything
[ ax.plot([x],[y],[z],'.',markersize=max(hist[x,y,z]*100,6),color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
#[ ax.plot([x],[y],[z],'.',color=color) for x,y,z,color in idxs if(hist[x][y][z]>0) ]
ax.set_xlim3d(0, bins-1)
ax.set_ylim3d(0, bins-1)
ax.set_zlim3d(0, bins-1)
azim = (azim+0.5)%360
ax.view_init(elev=35, azim=azim)
########### convert matplotlib to SimpleCV image
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
surf = pg.image.fromstring(raw_data, size, "RGB")
figure = Image(surf)
############ All done
figure = figure.floodFill((0,0), tolerance=5,color=Color.WHITE)
result = figure.blit(img, pos=(20,20))
result.save(disp)
fig.clf()
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/mpl_toolkits/mplot3d/axis3d.py | 7 | 17489 | #!/usr/bin/python
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <reinier@heeres.eu>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
from . import art3d
from . import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
self.stale = True
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
self.stale = True
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
self.stale = True
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform([peparray[0:2, 1]]) -
self.axes.transAxes.transform([peparray[0:2, 0]]))[0]
lxyz = 0.5*(edgep1 + edgep2)
# A rough estimate; points are ambiguous since 3D plots rotate
ax_scale = self.axes.bbox.size / self.figure.bbox.size
ax_inches = np.multiply(ax_scale, self.figure.get_size_inches())
ax_points_estimate = sum(72. * ax_inches)
deltas_per_point = 48. / ax_points_estimate
default_offset = 21.
labeldeltas = (self.labelpad + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = list(zip(xyz1, xyz0, xyz2))
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
default_offset = 8. # A rough estimate
labeldeltas = (tick.get_pad() + default_offset) * deltas_per_point\
* deltas
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
self.stale = False
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| mit |
NSLS-II-SRX/ipython_ophyd | profile_xf05id1-noX11/startup/85-bs_callbacks.py | 1 | 3670 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 24 12:30:06 2016
@author: xf05id1
"""
from bluesky.callbacks import CallbackBase,LivePlot
#import os
#import time as ttime
#from databroker import DataBroker as db, get_events
#from databroker.databroker import fill_event
import filestore.api as fsapi
#from metadatastore.commands import run_start_given_uid, descriptors_by_start
#import matplotlib.pyplot as plt
from xray_vision.backend.mpl.cross_section_2d import CrossSection
#from .callbacks import CallbackBase
#import numpy as np
#import doct
#from databroker import DataBroker as db
i0_baseline = 7.24e-10
class NormalizeLivePlot(LivePlot):
def __init__(self, *args, norm_key=None, **kwargs):
super().__init__(*args, **kwargs)
if norm_key is None:
raise RuntimeError("norm key is required kwarg")
self._norm = norm_key
def event(self, doc):
"Update line with data from this Event."
try:
if self.x is not None:
# this try/except block is needed because multiple event streams
# will be emitted by the RunEngine and not all event streams will
# have the keys we want
new_x = doc['data'][self.x]
else:
new_x = doc['seq_num']
new_y = doc['data'][self.y]
new_norm = doc['data'][self._norm]
except KeyError:
# wrong event stream, skip it
return
self.y_data.append(new_y / abs(new_norm-i0_baseline))
self.x_data.append(new_x)
self.current_line.set_data(self.x_data, self.y_data)
# Rescale and redraw.
self.ax.relim(visible_only=True)
self.ax.autoscale_view(tight=True)
self.ax.figure.canvas.draw_idle()
#class LiveImagePiXi(CallbackBase):
"""
Stream 2D images in a cross-section viewer.
Parameters
----------
field : string
name of data field in an Event
Note
----
Requires a matplotlib fix that is not released as of this writing. The
relevant commit is a951b7.
"""
# def __init__(self, field):
# super().__init__()
# self.field = field
# fig = plt.figure()
# self.cs = CrossSection(fig)
# self.cs._fig.show()
# def event(self, doc):
# #uid = doc['data'][self.field]
# #data = fsapi.retrieve(uid)
# data = doc['data']['pixi_image']
# self.cs.update_image(data)
# self.cs._fig.canvas.draw()
# self.cs._fig.canvas.flush_events()
#
def make_live_image(image_axes, key):
"""
Example
p--------
fig, ax = plt.subplots()
image_axes = ax.imshow(np.zeros((476, 512)), vmin=0, vmax=2)
cb = make_live_image(image_axes, 'pixi_image_array_data')
RE(Count([pixi]), subs={'event': [cb]})
"""
def live_image(name, doc):
if name != 'event':
return
image_axes.set_data(doc['data'][key].reshape(476, 512))
return live_image
class SRXLiveImage(CallbackBase):
"""
Stream 2D images in a cross-section viewer.
Parameters
----------
field : string name of data field in an Event
Note
----
Requires a matplotlib fix that is not released as of this writing. The
relevant commit is a951b7.
"""
def __init__(self, field):
super().__init__()
self.field = field
fig = plt.figure()
self.cs = CrossSection(fig)
self.cs._fig.show()
def event(self, doc):
uid = doc['data'][self.field]
data = fsapi.retrieve(uid)
self.cs.update_image(data)
self.cs._fig.canvas.draw_idle()
| bsd-2-clause |
leon-adams/datascience | algorithms/hobfield.py | 1 | 5247 | #
# Leon Adams
#
# Python Module for running a hopfield network to relocate the memory from a perturbed image.
# The raw data set is represented in png image format. This code takes the three color channels (rgb)
# Converts to a single channel gray scaled image and then transforms the output to a [-1,1] vector
# for use in calculation of a hobfield neural network.
#
# Dependencies: numpy; matplotlib
#
# Usage
# Can use as normal python module or can be used as a python script.
# When calling from command line as script supply corruption percent at end of call
#
# Example: python hopfield.py 2 3 4
# This will produced 2, 3, and 4 percent perturbation on the image file and then
# attempt to locate closest memorized pattern using hopfield network with hebb learning rule.
# If called without perturbation parameters default to [1, 5, 10, 15, 20, 25] corruption percentages.
# Output: output of the execution is a series of images showing first the perturbed
# image with the corrupted percentages in the title. Then we show the closest memorized
# image found from the hobfield network.
# begin import needed libraries
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# end import libraries
def rgb_to_gray_array(rgb):
'''
Helper function to convert from rgb tensor to matrix gray-scaled image representation.
Input: rgb tensor matrix of the three rgb color channels.
output: numpy array of gray-scaled numeric values.
'''
return np.dot(rgb[...,:3], np.array([0.299, 0.587, 0.114]))
def read_images(filenames):
'''
Read images to set to memory. Convert from rgb tensor to gray scale representation.
Takes a list of filenames in directory containing pixel images. Returns a list
of numpy arrays converted to gray-scale.
'''
data = [( mpimg.imread(number) ) for number in filenames]
return data, data[0].shape
def create_vector_image(data_array):
'''
Converts a gray-scaled image to [-1, +1] vector representation for hopfield networks.
'''
data_array = np.where(data_array < 0.99, -1, 1)
return data_array.flatten()
def print_unique_cnts(array):
print( np.unique(array, return_counts=True ) )
def train(memories):
'''
Training function for hobfield neural network. Trained with Hebb update rule.
'''
rate, c = memories.shape
Weight = np.zeros((c, c))
for p in memories:
Weight = Weight + np.outer(p,p)
Weight[np.diag_indices(c)] = 0
return Weight/rate
def look_up(Weight_matrix, candidate_pattern, shape, percent_corrupted, steps=5):
'''
Given a candidate pattern, lookup closet memorized stable state. Return the
stable memorized state.
'''
sgn = np.vectorize(lambda x: -1 if x<0 else 1)
img = None
for i in range(steps):
im = show_pattern(candidate_pattern, shape)
candidate_pattern = sgn(np.dot(candidate_pattern, Weight_matrix))
if img is None:
img = plt.imshow(im, cmap=plt.cm.binary, interpolation='nearest')
plt.title(str(percent_corrupted) + ' percent corrupted pixels')
else:
img.set_data(im)
plt.pause(.2)
plt.draw()
return candidate_pattern
def hopfield_energy(Weight, patterns):
'''
Calculates the current energy value for a given pattern and weight matrix.
'''
return np.array([-0.5*np.dot(np.dot(p.T, Weight), p) for p in patterns])
def show_img(image, shape):
'''
Helper function to produce visualization of an image.
'''
plt.imshow(image.reshape(shape), cmap=plt.cm.binary, interpolation='nearest')
plt.show()
def show_pattern(pattern, shape):
return np.where(pattern < 0, 0, 1).reshape(shape)
def corrupts(pattern, percentage):
'''
Helper function for deriving corrupted pattern images. Specify stable memory pattern
and the percentage of pixels to switch.
'''
counts = int( 2*np.ceil( len(pattern) * percentage / 200 ) )
neg_mask = np.where(pattern <= 0)[0]
pos_mask = np.where(pattern > 0)[0]
neg_corrupt_indices = np.random.choice(neg_mask, counts/2, replace = False)
pos_corrupt_indices = np.random.choice(pos_mask, counts/2, replace = False)
corrupt_pattern = np.copy(pattern)
corrupt_pattern[neg_corrupt_indices] = 1
corrupt_pattern[pos_corrupt_indices] = -1
return corrupt_pattern
data, shape = read_images(['datasets/C.png', 'datasets/D.png', 'datasets/J.png'])
stable_memories = np.array([create_vector_image(rgb_to_gray_array(array)) for array in data ])
norm_weight_matrix = train(stable_memories)
def test_stable_memories(stable_memory_patterns, corrupt_perentages):
for memory in stable_memory_patterns:
for percent in corrupt_perentages:
crpt_memory = corrupts(memory, percent)
look_up(norm_weight_matrix, crpt_memory, shape[0:2], percent_corrupted = percent, steps=5)
if __name__ == "__main__":
user_input = sys.argv
if len(user_input) > 1:
test_stable_memories(stable_memories, [float(i) for i in user_input[1:] ])
else:
test_stable_memories(stable_memories, [1, 5, 10, 15, 20, 25])
| mpl-2.0 |
karvenka/sp17-i524 | project/S17-IR-P014/code/delay.py | 15 | 5276 | import sys
import csv
import sip
#import org.apache.log4j.{Level, Logger}
import matplotlib
#matplotlib.user('agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from pyspark import SparkContext, SparkConf
from datetime import datetime
from operator import add, itemgetter
from collections import namedtuple
from datetime import datetime
import os
import time
from StringIO import StringIO
#Defining the fields, Creating a Flights class with the following fields as a tuple
#Each row is converted into a list
timestarted = time.time()
fields = ('date', 'airline', 'flightnum', 'origin', 'dest', 'dep',
'dep_delay', 'arv', 'arv_delay', 'airtime', 'distance')
Flight = namedtuple('Flight', fields, verbose=True)
DATE_FMT = "%Y-%m-%d"
TIME_FMT = "%H%M"
# User Defined Functions
def toCSVLine(data):
return ','.join(str(d) for d in data)
def split(line):
reader = csv.reader(StringIO(line))
return reader.next()
def parse(row):
row[0] = datetime.strptime(row[0], DATE_FMT).time()
row[5] = datetime.strptime(row[5], TIME_FMT).time()
row[6] = float(row[6])
row[7] = datetime.strptime(row[7], TIME_FMT).time()
row[8] = float(row[8])
row[9] = float(row[9])
row[10] = float(row[10])
return Flight(*row[:11])
def notHeader(row):
return "Description" not in row
def plot(airlinesdelays):
airlines = [d[0] for d in airlinesdelays]
minutes = [d[1] for d in airlinesdelays]
index = list(xrange(len(airlines)))
#Above we retrieved the respective columns from the list
#Here we mention the plot as a horizontal bar plot
fig, axe = plt.subplots()
bars = axe.barh(index, minutes)
# Add the total minutes to the right
for idx, air, min in zip(index, airlines, minutes):
if min > 0:
bars[idx].set_color('#d9230f')
axe.annotate(" %0.0f min" % min, xy=(min+1, idx+0.5), va='center')
else:
bars[idx].set_color('#469408')
axe.annotate(" %0.0f min" % min, xy=(10, idx+0.5), va='center')
# Set the ticks
ticks = plt.yticks([idx+ 0.5 for idx in index], airlines)
xt = plt.xticks()[0]
plt.xticks(xt, [' '] * len(xt))
# minimize chart junk
plt.grid(axis = 'x', color ='white', linestyle='-')
plt.title('Total Minutes Delayed per Airline')
plt.savefig('airlines.png')
#airlines.filter(notHeader).take(10)
#main method is the entry point for the following program
if __name__ == "__main__":
conf = SparkConf().setAppName("average")
sc = SparkContext(conf=conf)
#setting log level to error
# val rootLogger = Logger.getRootLogger()
# rootLogger.setLevel(Level.ERROR)
#importing data from HDFS for performing analysis
airlines = sc.textFile(sys.argv[1])
# airlines = sc.textFile("hdfs://192.168.1.8:8020/fltdata/airlines.csv")
flights = sc.textFile(sys.argv[2])
airports =sc.textFile(sys.argv[3])
airlinesParsed = dict(airlines.map(split).collect())
airportsParsed= airports.filter(notHeader).map(split)
# print "without header and spliting up", airlines.take(10)
# print "without header and spliting up", airlines.take(10)
flightsParsed= flights.map(lambda x: x.split(',')).map(parse)
#print "The average delay is "+str(sumCount[0]/float(sumCount[1]))
airportDelays = flightsParsed.map(lambda x: (x.origin,x.dep_delay))
# First find the total delay per airport
airportTotalDelay=airportDelays.reduceByKey(lambda x,y:x+y)
# Find the count per airport
airportCount=airportDelays.mapValues(lambda x:1).reduceByKey(lambda x,y:x+y)
# Join to have the sum, count in 1 RDD
airportSumCount=airportTotalDelay.join(airportCount)
# Compute avg delay per airport
airportAvgDelay=airportSumCount.mapValues(lambda x : x[0]/float(x[1]))
airportDelay = airportAvgDelay.sortBy(lambda x:-x[1])
print "", airportDelay.take(10)
airportLookup=airportsParsed.collectAsMap()
#airlineLookup=airlinesParsed.collectAsMap()
airline_lookup = sc.broadcast(airlinesParsed)
airlinesdelays = flightsParsed.map(lambda f: (airline_lookup.value[f.airline],add(f.dep_delay, f.arv_delay)))
airlinesdelays = delays.reduceByKey(add).collect()
airlinesdelays = sorted(delays, key=itemgetter(1))
#tenairlines = delays.map(toCSVLine)
ten = airportAvgDelay.map(lambda x: (airportLookup[x[0]],x[1]))
#print "", ten.take(10)
for d in airlinesdelays:
print "%0.0f minutes delayed\t%s" % (d[1], d[0])
airportBC=sc.broadcast(airportLookup)
topTenAirportsWithDelays = airportAvgDelay.map(lambda x: (airportBC.value[x[0]],x[1])).sortBy(lambda x:-x[1])
lines = topTenAirportsWithDelays.take(10)
topten = "/home/hadoop/"
tenairlines = "/home/hadoop/"
#For collecting the outputs into csv files
with open('topten', "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in lines:
writer.writerows([val])
with open('tenairlines',"w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in delays:
writer.writerows([val])
plot(airlinesdelays)
#Final time taken will be calculated here
timetaken = time.time()-timestarted
print "", timetaken
| apache-2.0 |
yarikoptic/NiPy-OLD | examples/neurospin/demo_dmtx.py | 1 | 2005 | """ test code to make a design matrix
"""
import numpy as np
from nipy.neurospin.utils.design_matrix import dmtx_light
tr = 1.0
frametimes = np.linspace(0,127*tr,128)
conditions = [0,0,0,1,1,1,3,3,3]
onsets=[30,70,100,10,30,90,30,40,60]
hrf_model = 'Canonical'
motion = np.cumsum(np.random.randn(128,6),0)
add_reg_names = ['tx','ty','tz','rx','ry','rz']
#event-related design matrix
paradigm = np.vstack(([conditions, onsets])).T
x1,name1 = dmtx_light(frametimes, paradigm, drift_model='Polynomial',
drift_order=3, add_regs=motion, add_reg_names=add_reg_names)
# block design matrix
duration = 7*np.ones(9)
paradigm = np.vstack(([conditions, onsets, duration])).T
x2,name2 = dmtx_light(frametimes, paradigm, drift_model='Polynomial', drift_order=3)
# FIR model
paradigm = np.vstack(([conditions, onsets])).T
hrf_model = 'FIR'
x3,name3 = dmtx_light(frametimes, paradigm, hrf_model = 'FIR',
drift_model='Polynomial', drift_order=3,
fir_delays = range(1,6))
import matplotlib.pylab as mp
mp.figure()
mp.imshow(x1/np.sqrt(np.sum(x1**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name1!=None:
mp.xticks(np.arange(len(name1)),name1,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of event-related design matrix')
mp.figure()
mp.imshow(x2/np.sqrt(np.sum(x2**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name2!=None:
mp.xticks(np.arange(len(name2)),name2,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of block design matrix')
mp.figure()
mp.imshow(x3/np.sqrt(np.sum(x3**2,0)),interpolation='Nearest', aspect='auto')
mp.xlabel('conditions')
mp.ylabel('scan number')
if name3!=None:
mp.xticks(np.arange(len(name3)),name3,rotation=60,ha='right')
mp.subplots_adjust(top=0.95,bottom=0.25)
mp.title('Example of FIR design matrix')
mp.show()
| bsd-3-clause |
sarathid/Learning | Intro_to_ML/pca/eigenfaces.py | 9 | 4989 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# for machine learning we use the data directly (as relative pixel
# position info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
# for sklearn version 0.16 or prior, the class_weight parameter value is 'auto'
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show()
| gpl-3.0 |
DonBeo/statsmodels | statsmodels/graphics/tests/test_gofplots.py | 27 | 6814 | import numpy as np
from numpy.testing import dec
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot
from scipy import stats
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class BaseProbplotMixin(object):
def base_setup(self):
if have_matplotlib:
self.fig, self.ax = plt.subplots()
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_ppplot(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_probplot(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_array(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_array(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_array(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_prbplt(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_prbplt(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_prbplt(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_qqplot_custom_labels(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_ppplot_custom_labels(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_probplot_custom_labels(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_qqplot_pltkwargs(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_ppplot_pltkwargs(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_probplot_pltkwargs(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
class TestProbPlotLongely(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.line = 'r'
self.base_setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data)
self.line = None
self.base_setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, fit=True)
self.line = 'q'
self.base_setup()
class TestProbPlotRandomNormalLocScale(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, loc=8.25, scale=3.25)
self.line = '45'
self.base_setup()
class TestTopLevel(object):
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
fig = sm.qqplot(self.res, line='r')
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_ProbPlotObjects(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with `ProbPlot` instances
fig = sm.qqplot_2samples(self.prbplt, self.other_prbplot,
line=line)
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_arrays(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with arrays
fig = sm.qqplot_2samples(self.res, self.other_array, line=line)
| bsd-3-clause |
jimsrc/seatos | mixed/figs/sheaths.paper/src/together4.py | 1 | 11024 | #!/usr/bin/env ipython
from pylab import *
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
import os, sys
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from numpy import array
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
class gral:
def __init__(self):
self.name='name'
TS = 11
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def makefig(ax, mc, sh, TEXT, TEXT_LOC, YLIMS, varname):
LW = 0.3 # linewidth
MS = 1.5
fmc,fsh = 3.0, 1.0 # escaleos temporales
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=MS, label='mean', lw=LW)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', label='median', lw=LW)
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=MS, lw=LW)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', lw=LW)
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#ax.legend(loc='best', fontsize=10)
ax.tick_params(labelsize=TS)
ax.grid()
ax.set_xlim(-2.0, 7.0)
ax.set_ylim(YLIMS)
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=7)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=7)
if(varname in ('beta','Temp', 'rmsB', 'rmsBoB')):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
return ax
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
stf = {}
stf['B'] = {
'label': 'B [nT]',
'ylims': [5., 29.],
'text_loc_1': {'mc':[4.5, 15.0], 'sh':[-1.95, 12.0]},
'text_loc_2': {'mc':[4.5, 18.0], 'sh':[-1.95, 12.0]},
'text_loc_3': {'mc':[4.5, 12.0], 'sh':[-1.95, 12.0]},
'nrow': 1
}
stf['V'] = {
'label': 'Vsw [km/s]',
'ylims': [350., 800.],
'text_loc_1': {'mc':[4.5, 500.0], 'sh':[-1.95, 520.0]},
'text_loc_2': {'mc':[4.5, 600.0], 'sh':[-1.95, 600.0]},
'text_loc_3': {'mc':[4.5, 410.0], 'sh':[-1.95, 600.0]},
'nrow': 2
}
stf['rmsBoB'] = {
'label': 'rmsBoB [1]',
'ylims': [0.015, 0.21],
'text_loc_1': {'mc':[4.5, 0.020], 'sh':[-1.95, 0.02]},
'text_loc_2': {'mc':[4.5, 0.095], 'sh':[-1.95, 0.02]},
'text_loc_3': {'mc':[4.5, 0.099], 'sh':[-1.95, 0.02]},
'nrow': 6
}
stf['rmsB'] = {
'label': 'rmsB [nT]',
'ylims': [0.1, 4.0],
'text_loc_1': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_2': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_3': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'nrow': 1
}
stf['beta'] = {
'label': '$\\beta$ [1]',
'ylims': [0.02, 10.0],
'text_loc_1': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_2': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_3': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'nrow': 5
}
stf['Pcc'] = {
'label': '$n_p$ [$cm^{-3}$]',
'ylims': [1, 23],
'text_loc_1': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_2': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_3': {'mc':[4.5, 11], 'sh':[-1.95, 18.0]},
'nrow': 3
}
stf['Temp'] = {
'label': 'T ($\\times 10^4$) [K]',
'ylims': [1e4, 100e4],
'text_loc_1': {'mc':[4.5, 18.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_2': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_3': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'nrow': 4
}
stf['AlphaRatio'] = {
'label': 'alpha ratio [1]',
'ylims': [0.02, 0.09],
'text_loc_1': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_2': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_3': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]}
}
stf['CRs'] = {
'label': '$n_{GCR}$ [%]',
'ylims': [-8.0, 2.0],
'text_loc_1': {'mc':[4.5, -4.0], 'sh':[-1.95, -4.5]},
'text_loc_2': {'mc':[4.5, -7.0], 'sh':[-1.95, -4.5]},
'text_loc_3': {'mc':[4.5, -7.5], 'sh':[-1.95, -4.5]},
'nrow': 2
}
TEXT = {}
dir_figs = sys.argv[1] #'../figs'
#dir_inp_mc = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dir_inp_sh = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = os.environ['RIGHT']
dir_inp_sh = os.environ['LEFT']
vlo = [100.0, 450.0, 550.0]
vhi = [450.0, 550.0, 3000.0]
nvars = len(stf.keys())
print " input: "
print " %s " % dir_inp_mc
print " %s \n" % dir_inp_sh
print " vlo, vhi: ", (vlo, vhi), '\n'
print " nvars: ", nvars
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
i=2
#fig = figure(1, figsize=(12, 15))
f = plt.figure(1, figsize=(7, 5.8))
nr = 1 # scale for row size
gs = GridSpec(nrows=3*nr, ncols=2*3)
gs.update(left=0.1, right=0.98, hspace=0.13, wspace=0.15)
for i in range(3):
fname_inp = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%3.1f.vhi.%3.1f' % (vlo[i], vhi[i])
fname_inp_nro_mc = dir_inp_mc + '/n.events_' + fname_inp + '.txt'
fname_inp_nro_sh = dir_inp_sh + '/n.events_' + fname_inp + '.txt'
#n = 1 # number of row
print " ______ col %d ______" % i
for varname in ('rmsB', 'CRs'):
# abro el file para averiguar el nro de eventos
fnro_mc = open(fname_inp_nro_mc, 'r')
fnro_sh = open(fname_inp_nro_sh, 'r')
for lmc, lsh in zip(fnro_mc, fnro_sh):
l_mc = lmc.split()
l_sh = lsh.split()
if varname==l_mc[0]: # nombre de la variable
n = stf[varname]['nrow']
ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
Nfinal_mc, Nfinal_sh = int(l_mc[1]), int(l_sh[1]) # nmbr of events
fnro_mc.close(); fnro_sh.close()
break
print " %s"%varname, ' Nfinal_mc:%d' % Nfinal_mc, 'Nfinal_sh:%d' % Nfinal_sh
mc, sh = gral(), gral()
fname_inp_mc = dir_inp_mc + '/' + fname_inp + '_%s.txt' % varname
fname_inp_sh = dir_inp_sh + '/' + fname_inp + '_%s.txt' % varname
mc.tnorm, mc.med, mc.avr, mc.std_err, mc.nValues = np.loadtxt(fname_inp_mc).T
sh.tnorm, sh.med, sh.avr, sh.std_err, sh.nValues = np.loadtxt(fname_inp_sh).T
# nro de datos con mas del 80% non-gap data
TEXT['mc'] = ' N: %d' % Nfinal_mc
TEXT['sh'] = ' N: %d' % Nfinal_sh
if(vlo[i]==100.0):
TEXT_LOC = stf[varname]['text_loc_1'] #1.7, 12.0
elif(vlo[i]==450.0):
TEXT_LOC = stf[varname]['text_loc_2'] #1.7, 12.0
elif(vlo[i]==550.0):
TEXT_LOC = stf[varname]['text_loc_3'] #1.7, 12.0
else:
print " ----> ERROR con 'v_lo'!"
raise SystemExit
ylims = array(stf[varname]['ylims']) #[4., 17.]
ylabel = stf[varname]['label'] #'B [nT]'
ax = makefig(ax, mc, sh, TEXT, TEXT_LOC, ylims, varname)
# ticks & labels x
ax.tick_params(labelsize=TS)
if n==2: #n==nvars-1:
ax.set_xlabel('time normalized to\nsheath/MC passage [1]', fontsize=11)
#ax.xaxis.set_ticklabels([-1,0,1,2,3])
xticks = [-2,-1,0,1,2,3,4,5,6,7]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
else:
ax.set_xlabel('')
#ax.get_xaxis().set_ticks([])
ax.xaxis.set_ticklabels([])
# ticks & labels y
if i==0:
ax.set_ylabel(ylabel, fontsize=15)
else:
ax.set_ylabel('')
#ax.get_yaxis().set_ticks([])
ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++ nCR & model-fit
#dirs = {}
#dirs['sheath'] = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['mc'] = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['fname_inputs'] = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
#dirs['figs'] = dir_figs
#
#par = {}
#par['lo'] = {
# 'vlo': 100.0,
# 'vhi': 450.0,
# 'tau': 2.36,
# 'bp' : 0.0,
# 'q' : -9.373,
# 'off': 0.89,
# 'bo' : 16.15
#}
#par['mid'] = {
# 'vlo': 450.0,
# 'vhi': 550.0,
# 'tau': 4.18,
# 'bp' : -0.9,
# 'q' : -6.02,
# 'off': 0.0,
# 'bo' : 11.87
#}
#par['hi'] = {
# 'vlo': 550.0,
# 'vhi': 3000.0,
# 'tau': 5.78,
# 'bp' : -0.18,
# 'q' : -5.53,
# 'off': 1.01,
# 'bo' : 14.48
#}
#
#from funcs import build_plot
#n = 3; i=0
#for i, name in zip(range(3), ('lo', 'mid', 'hi')):
# ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
# build_plot(dirs, par[name], ax)
# if i==0:
# ax.set_ylabel('$n_{GCR}$ [%]', fontsize=15)
# else:
# ax.set_ylabel('')
# ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++++++++++++++++++
#fig.tight_layout()
#fname_fig = dir_figs + '/fig_vlo.%3.1f_vhi.%3.1f_%s.png'%(vlo, vhi, varname)
fname_fig = '%s/figs_splitted_3.png' % dir_figs
savefig(fname_fig, dpi=150, bbox_inches='tight')
close()
print "\n output en:\n %s\n" % fname_fig
#EOF
| mit |
CleverChuk/ices | Python/multijob_module.py | 1 | 3479 | """
Author: Chukwubuikem Ume-Ugwa
Email: chubiyke@gmail.com
MIT License
Copyright (c) 2017 CleverChuk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from dataparser import *
from multiprocessing import Pool, Manager
import os
import time
manager = Manager()
heightD = manager.dict() # holds values for minimum height of each particle
TSIZE = 8 # data type size in bytes
N_OFFSETS = 44 # number of data
FCOLOR = genColor(N_OFFSETS, manager)
# Dimension of the simulation bed
xsize = 78
ysize = 112
zsize = 104
hOut = "HeightData"
def startChild(fname):
# DISALLOWED IN PYTHON
iam.fn = fname
dictn = iam.manager.dict()
mylist = iam.manager.list()
pool = Pool()
# passing offset multiplier to the producer task
pool.map(iam.producer, [i for i in range(1 , iam.N_OFFSETS)], 1)
# Feeds task from producers into the list
for i, j in self.dictn.items():
mylist.append(j[0])
# single process to handle plotting
proc = Process(target=iam.consumer, args=(mylist, ))
proc.start()
proc.join()
def multijob(fname):
"""
Handles reading and plotting of data in file with name fname
"""
print("Starting multijob from process: %d" % os.getpid())
fig = plt.figure()
axis = Axes3D(fig)
heightL = manager.list()
axis = Axes3D(fig)
axis.set_xlim([0,ysize])
axis.set_ylim([0,ysize])
axis.set_zlim([0,ysize])
axis.view_init(elev = 40, azim = 50)
coords = manager.list()
rho = readsingle(fname)
for i in range(1, N_OFFSETS):
eta_s = readsingle(fname, i * TSIZE)
# eta_s = process(rho, filter_eta(eta_s))
coords.append(getcoords(eta_s, xsize, ysize, zsize))
heightL.append(max(coords[-1][-2]) - min(coords[-1][-2]))
writtable(hOut,str(heightL).strip('[]'))
plot(coords, fig, axis, count = "ALL", fcolor = FCOLOR, multijob = (True,fname))
print("Finished multijob from process: %d" % os.getpid())
if __name__ == "__main__":
print("Starting mutiple jobs in a process task")
import timeit, sys
start_time = timeit.default_timer()
if(os.path.exists(hOut)):
os.remove(hOut)
pool = Pool()
files = list()
MAXCOUNT = 4
STEP = 2
START = 0
FNAME = "fullT{0}.dat"
## file with filesname to work on
for i in range(START, MAXCOUNT, STEP):
files.append(FNAME.format(i))
pool.map(multijob, files, 1)
elapsed = timeit.default_timer() - start_time
print("total time %d seconds" % elapsed)
print("Finished multiple job in a process task")
| mit |
ZenDevelopmentSystems/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
TheGhostHuCodes/spy_dir | spy_dir.py | 1 | 2182 | #!/usr/bin/env python
import os
import os.path as pt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
#TODO: take decimal places as parameter for printing.
def sizeof_pp(num):
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']:
if abs(num) < 1024.0:
return "%3.2f %s" % (num, unit)
num /= 1024.0
return "%.2f %s" % (num, 'Yi')
def xtic_formatter(num, tick_index):
return sizeof_pp(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument('dir_path', metavar='Path', type=str, help='')
parser.add_argument('-p', '--plot', action='store_true')
args = parser.parse_args()
sizes = []
symlink_count = 0
for root, dirs, files in os.walk(args.dir_path, followlinks=False):
for name in files:
fullpath = pt.join(root, name)
if not os.path.islink(fullpath):
sizes.append(pt.getsize(fullpath))
else:
symlink_count += 1
sizes.sort()
print("Searching in directory: {0}".format(args.dir_path))
print("Files Inspected: {0}".format(len(sizes)))
print("Maxfilesize: " + sizeof_pp(sizes[-1]))
print("Symlinks found: {0}".format(symlink_count))
percentile = 95
index = len(sizes) * (percentile / 100.)
print("{0}% of files smaller than: ~".format(percentile) + sizeof_pp(
sizes[int(index)]))
sizesArray = np.asarray(sizes)
if (args.plot):
bins = min(len(sizes) / 10, 200)
plt.figure(figsize=(8, 8))
ax = plt.subplot(111)
# Adjust y-axis to show bins of height 1 and max bin height.
n, _, _ = plt.hist(sizesArray, bins, log=True)
plt.ylim(0.5, max(n) * 1.1)
plt.xlabel("File Size (bytes)")
plt.ylabel("Log(Number of Files)")
plt.title("File size histogram for: {0}".format(args.dir_path))
x_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
x_formatter.set_scientific(False)
x_format = mpl.ticker.FuncFormatter(xtic_formatter)
ax.xaxis.set_major_formatter(x_format)
plt.show()
| apache-2.0 |
0x0all/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
dcastro9/patternrec_ps2 | code/alcohol_script.py | 1 | 5623 | from Dataset import Dataset
from WTA_Hasher import WTAHasher
from kNN_Classifier import kNNClassifier
import numpy as np
import matplotlib.pyplot as plt
import copy
ds_train_dir = "../datasets/alcohol/alcoholism_training.csv"
ds_test_dir = "../datasets/alcohol/alcoholism_test.csv"
results_dir = "../final_results/alcohol/"
num_k_values = 10
weights = [1,1,1,1,1,3]
ds_orig = Dataset(ds_train_dir, name='Original Data')
ds_norm = Dataset(ds_train_dir, normalize=True, name='Normalized Data')
ds_norm_weigh = Dataset(ds_train_dir, normalize=True, weights=weights,
name='Norm & Weighted Data')
ds_whiten = Dataset(ds_train_dir, whiten=True, name='Whitened Data')
ds_orig_t = Dataset(ds_test_dir)
ds_norm_t = Dataset(ds_test_dir, normalize=True)
ds_norm_weigh_t = Dataset(ds_test_dir, normalize=True, weights=weights)
ds_whiten_t = Dataset(ds_test_dir, whiten=True)
alcohol_datasets = [[ds_orig, ds_orig_t],
[ds_norm, ds_norm_t],
[ds_norm_weigh, ds_norm_weigh_t],
[ds_whiten, ds_whiten_t]]
k_values = range(1,num_k_values*2,2)
color=['red','blue','green','black']
labels=['20%', '50%', '80%', '100%']
folds=['2-fold', '5-fold', 'N-fold']
for ds in alcohol_datasets:
train_data_all = ds[0].data
test_data = ds[1].data
# Accuracy for get 20%, 50%, 80% and 100% of the data.
# Each subset will have
train_accuracy = [[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)],
[np.zeros(num_k_values), np.zeros(num_k_values), np.zeros(num_k_values)]]
best_k_and_ds = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
for it in range(5):
train_data_20, t = Dataset.getRandomPercent(train_data_all, 0.2)
train_data_50, t = Dataset.getRandomPercent(train_data_all, 0.5)
train_data_80, t = Dataset.getRandomPercent(train_data_all, 0.8)
all_training_data = [train_data_20,
train_data_50,
train_data_80,
train_data_all]
# Only run on train_data_all once.
if it > 0:
all_training_data = all_training_data[:-1]
for val in range(len(all_training_data)):
for k in k_values:
print str(it) + ": Training on: " + labels[val] + "for k value: " + str(k) + " for " + ds[0].name
# Do 2-5-N Fold Cross Validation.
cv_2 = Dataset.getkPartitions(all_training_data[val], 2)
cv_5 = Dataset.getkPartitions(all_training_data[val], 5)
cv_n = Dataset.getkPartitions(all_training_data[val],
len(all_training_data[val]))
cvs = [cv_2, cv_5, cv_n]
cross_val_accuracy = [0, 0, 0]
for cv_c in range(len(cvs)):
# Does f-Fold cross validation.
accuracy = 0
for fold in range(len(cvs[cv_c])):
td = copy.deepcopy(cvs[cv_c]) # Copy the cross validation dataset.
del td[fold] # Delete the item we're using for testing.
td_reshaped = []
for elem in td:
for item in elem:
td_reshaped.append(item)
knn = kNNClassifier(td_reshaped, k) # Initialize kNN.
accuracy += knn.test(cvs[cv_c][fold]) # Test.
accuracy /= len(cvs[cv_c])
if best_k_and_ds[val][cv_c] == 0:
best_k_and_ds[val][cv_c] = [k, td_reshaped, accuracy]
elif best_k_and_ds[val][cv_c][2] < accuracy:
best_k_and_ds[val][cv_c] = [k, td_reshaped, accuracy]
train_accuracy[val][cv_c][k/2] += accuracy
# Write results to file.
out_f = open(results_dir + ds[0].name + ".txt", 'w')
for cnt in range(len(train_accuracy)):
# Setup plot.
plt.xlabel('k Values')
plt.ylabel('Accuracy')
plt.title(ds[0].name)
average = True
if cnt == len(train_accuracy) - 1:
average = False
for fold in range(len(train_accuracy[cnt])):
if (average):
train_accuracy[cnt][fold] /= 5
plt.plot(k_values, train_accuracy[cnt][fold], color=color[fold],
label=folds[fold])
out_f.write(labels[cnt] + ":" + folds[fold] + ":" +
str(train_accuracy[cnt][fold]) + "\n")
# Save plot.
plt.legend()
plt.savefig(results_dir + ds[0].name + labels[cnt] + ".pdf")
plt.clf()
plt.cla()
# Now we test with the original test data provided.
out_f.write("\n\n Testing for best k & DS for:" + ds[0].name +"\n")
for val in range(len(best_k_and_ds)):
for fold in range(len(best_k_and_ds[val])):
knn = kNNClassifier(best_k_and_ds[val][fold][1],
best_k_and_ds[val][fold][0]) # Initialize kNN.
out = knn.test(test_data) # Test.
out_f.write(labels[val] + " with k:" +
str(best_k_and_ds[val][fold][0]) + " at " + folds[fold] +
" original accuracy:" + str(best_k_and_ds[val][fold][2]) +
" vs accuracy:" + str(out) + "\n")
# Close file.
out_f.close() | mit |
hagabbar/pycbc_copy | examples/distributions/spin_spatial_distr_example.py | 14 | 1973 | import numpy
import matplotlib.pyplot as plt
import pycbc.coordinates as co
from mpl_toolkits.mplot3d import Axes3D
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in units
# of pi so we use between 0 and 1.
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi.
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py
# Here we are using the Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution.
# In this case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=10000)
# Make a spin 1 magnitude since solid angle is only 2 dimensions and we need a
# 3rd dimension for a 3D plot that we make later on.
spin_mag = numpy.ndarray(shape=(10000), dtype=float)
for i in range(0,10000):
spin_mag[i] = 1.
# Use pycbc.coordinates as co. Use spherical_to_cartesian function to
# convert from spherical polar coordinates to cartesian coordinates.
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Plot the spherical distribution of spins to make sure that we
# distributed across the surface of a sphere.
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spinx, spiny, spinz, s=1)
ax.set_xlabel('Spin X Axis')
ax.set_ylabel('Spin Y Axis')
ax.set_zlabel('Spin Z Axis')
plt.show()
| gpl-3.0 |
AndrewBMartin/pygurobi | pygurobi/pygurobi.py | 1 | 31972 | """
Functions to support rapid interactive modification of Gurobi models.
For reference on Gurobi objects such as Models, Variables, and Constraints, see
http://www.gurobi.com/documentation/7.0/refman/py_python_api_overview.html.
"""
import csv
import json
try:
import gurobipy as gp
except ImportError:
raise ImportError("gurobipy not installed. Please see {0} to download".format(
"https://www.gurobi.com/documentation/6.5/quickstart_mac/the_gurobi_python_interfac.html"))
# Assuming that constraints are of the form:
# constraintName(index1,index2,...,indexN).
# Asuming that variables are of the form:
# variableName[index1,index2,...,indexN]
CON_BRACKET_L = "("
CON_BRACKET_R = ")"
VAR_BRACKET_L = "["
VAR_BRACKET_R = "]"
# 13 July 2016 - Need to sort out capitalization here for attributes
# Attributes of a Gurobi variable
VAR_ATTRS = ["LB", "UB", "Obj", "VType", "VarName", "X", "Xn", "RC",
"BarX", "Start", "VarHintVal", "VarHintPri", "BranchPriority",
"VBasis", "PStart", "IISLB", "IISUB", "PWLObjCvx",
"SAObjLow", "SAObjUp", "SALBLow", "SALBUp",
"SAUBLow", "SAUBUp", "UnbdRay"]
# Attributes of a Gurobi constraint
CON_ATTRS = ["Sense", "RHS", "ConstrName", "Pi", "Slack",
"CBasis", "DStart", "Lazy", "IISConstr",
"SARHSLow", "SARHSUp", "FarkasDual"]
def read_model(filename):
"""
Read a model using gurobipy.
"""
m = gp.read(filename)
return m
def reoptimize(m):
"""
Update, reset, and optimize
a model.
"""
m.update()
m.reset()
m.optimize()
def get_variable_attrs():
"""
Return a list of variable attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return VAR_ATTRS
def get_constraint_attrs():
"""
Return a list of constraint attributes.
Details of attributes found at the Gurobi
website:
http://www.gurobi.com/documentation/6.5/refman/attributes.html
"""
return CON_ATTRS
def list_constraints(model):
"""
Print to screen the constraint sets in the model.
Show the name of each constraint set along with the
number of constraints in that set.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
"""
sets = {}
constraints = model.getConstrs()
# Assuming constraint set name separated from indicies by
for c in constraints:
name = c.constrName
split_name = name.split(CON_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Constraint set, Number of constraints"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def list_variables(model):
"""
Print to screen the variable sets in the model.
Show the name of each variable set along with the
number of variables in that set.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in variable sets A and B, respectively
"""
sets = {}
variables = model.getVars()
# Assuming constraint set name separated from indicies by
for v in variables:
name = v.varName
split_name = name.split(VAR_BRACKET_L)
set_name = split_name[0]
if set_name not in sets:
sets[set_name] = 1
else:
sets[set_name] += 1
print "Variable set, Number of variables"
print "\n".join(["{0}, {1}".format(name, number) for name, number
in sorted(sets.items())])
def get_variables(model, name="", approx=False, filter_values={}, exclude=False):
"""
Return a list of variables from the model
selected by variable set name.
A variable set is composed of all variables
sharing the same string identifier before the indices:
A[2,3,4] and A[1,2,3] are in the same variable set, A;
A[2,3,4] and B[2,3,4] are in varaible sets A and B, respectively
PyGurobi by default assumes that *variable names* are separated
from indices by square brackets "[" and "]",
For example, variables look like x[i,j] - "x" in the variable set name,
and "i" and "j" and the variable's index values.
See the source code for more details.
"""
variables = []
if not name:
variables = model.getVars()
if not approx:
variables = [v for v in model.getVars()
if v.varName.split(VAR_BRACKET_L)[0] == name]
else:
variables = [v for v in model.getVars()
if name in v.varName.split(VAR_BRACKET_L)[0]]
if filter_values:
variables = filter_variables(variables, filter_values,
exclude=exclude)
return variables
def check_attr(attr, attributes):
"""
Check if the attr string case-insensitively corresponds to a
Gurobi attribute.
"""
for a in attributes:
if attr == a:
return True
if attr.lower() == a.lower():
return True
return False
def check_variable_attr(attr):
"""
Check if a string corresponds to a variable attribute.
Case-insensitive.
"""
var_attrs = get_variable_attrs()
return check_attr(attr, var_attrs)
def check_constraint_attr(attr):
"""
Check if a string corresponds to a constraint attribute.
Attributes are case-insensitive.
"""
con_attrs = get_constraint_attrs()
return check_attr(attr, con_attrs)
def get_variables_attr(attr, model="", name="", variables=""):
"""
Return a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
# Make a list of attributes at the top and check against
# them to make sure that the specified attribute belongs.
if not model and not variables:
raise ValueError("No model or variable list given")
variables = variables_check(model, name, variables)
return {v.varName: getattr(v, attr) for v in variables}
def print_variables_attr(attr, model="", name="", variables=""):
"""
Print to screen a dictionary of variables names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_attr(attr, model=model,
name=name, variables=variables)
print "\n".join(["{0}, {1}".format(v, k) for v, k in
sorted(var_dict.items())])
def set_variables_attr(attr, val, model="", name="", variables=""):
"""
Set an attribute of a model variable set.
Specifiy either model and name parameters or supply a list of variables
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
return
if not check_variable_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not variables:
raise ValueError("No model or variables specified")
variables = variables_check(model, name, variables)
for v in variables:
setattr(v, attr, val)
def zero_all_objective_coeffs(model):
"""
Set all objective coefficients in a model to zero.
"""
if not model:
raise ValueError("No model given")
for v in model.getVars():
v.Obj = 0
def set_variables_bounds(lb="", ub="", model="", name="", variables=""):
"""
Set the lower bound and/or upper bound for a variables set.
Specifiy either model and name parameters or supply a list of variables
"""
if lb:
set_variables_attr("lb", val=lb, model=model,
name=name, variables=variables)
if ub:
set_variables_attr("ub", val=ub, model=model,
name=name, variables=variables)
def remove_variables_from_model(model, name="", variables=""):
"""
Remove the given variables from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not variables:
raise ValueError("No model or variables given")
if not model:
raise ValueError("No model given")
variables = variables_check(model, name, variables)
for v in variables:
model.remove(v)
def variables_check(model, name, variables):
"""
Return the appropriate
variables based on the information supplied.
"""
if variables:
return variables
if model and name:
variables = get_variables(model, name)
if model and not name:
variables = model.getVars()
if not variables:
print "No variables found for\nmodel: {0},\nname: {1}".format(
model, name)
return variables
def get_variable_index_value(variable, index):
"""
Return the value of the given index
for a given variable.
Variable names are assumed to be given
as A[a,c,d, ....,f]
"""
value = variable.varName.split(",")[index].strip()
if VAR_BRACKET_R in value:
value = value[:-1]
elif VAR_BRACKET_L in value:
value = value.split(VAR_BRACKET_L)[1]
# Not expecting many variable index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_linexp_from_variables(variables):
"""
Return a linear expression from the supplied list
of variables.
"""
linexp = gp.LinExpr()
for v in variables:
linexp += v
return linexp
def sum_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to the sum
of the solution values of all matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
var_dict = get_variables_by_index(index, model=model, name=name,
variables=variables)
if not var_dict:
raise ValueError("No variables found".format(index))
new_dict = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_dict(dictionary):
"""
Print a dictionary to screen.
"""
print "\n".join(["{0}, {1}".format(index_name, index_value)
for index_name, index_value in
sorted(dictionary.items())])
def print_variables_sum_by_index(index, model="", name="", variables=""):
"""
Print a dictionary of variables, summed by index.
"""
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
print_dict(var_dict)
def get_variables_by_index(index, model="", name="", variables=""):
"""
Return a dictionary mapping index values to lists of
matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not variables:
raise ValueError("No model or variables given")
if not (name and model) and not variables:
raise ValueError("No variables specified")
variables = variables_check(model, name, variables)
var_dict = {}
for v in variables:
value = get_variable_index_value(v, index)
if value not in var_dict:
var_dict[value] = [v]
else:
var_dict[value].append(v)
return var_dict
def filter_variables(variables, filter_values, exclude=False):
"""
Return a new list of variables that match the filter values
from the given variables list.
"""
if not variables:
raise ValueError("variables not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_vars = []
for v in variables:
add = True
for index, value in filter_values.iteritems():
key = get_variable_index_value(v, index)
if key != value:
add = False
break
if add:
new_vars.append(v)
if exclude:
new_vars = [v for v in (set(variables)-set(new_vars))]
return new_vars
def get_variables_by_index_values(model, name, index_values, exclude=False):
variables = get_variables(model, name, index_values, exclude)
return variables
def get_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary of variables mapping index1 values
to dictionaries mapping
index2 values to matching variables.
Specifiy either model and name parameters or supply a list of variables
"""
two_indices_dict = {}
index1_dict = get_variables_by_index(index1, model=model, name=name,
variables=variables)
for key, value in index1_dict.iteritems():
two_indices_dict[key] = get_variables_by_index(index2, variables=value)
return two_indices_dict
def print_variables(variables):
"""
Print a list of variables to look good.
"""
print "\n".join([v.varName for v in variables])
def sum_variables_by_two_indices(index1, index2, model="", name="", variables=""):
"""
Return a dictionary mapping index1 values
to dictionaries of the given variables summed over index2.
"""
two_indices_dict = get_variables_by_two_indices(index1, index2,
model=model, name=name, variables=variables)
if not two_indices_dict:
raise ValueError("Inputs did not match with model variables")
new_dict = {}
for key, var_dict in two_indices_dict.iteritems():
new_dict[key] = {index_name: sum([v.X for v in index_vars])
for index_name, index_vars in
sorted(var_dict.items())}
return new_dict
def print_two_indices_dict(indices_dict):
"""
Print to screen a two level nested dictionary.
"""
for key, value in indices_dict.iteritems():
print "\n{0}".format(key)
print_dict(value)
def get_linexp_by_index(index, model="", name="", variables=""):
"""
Return a dictionary of index values to Gurobi linear expressions
corresponding to the summation of variables that match the index
value for the given index number.
Specifiy either model and name parameters or supply a list of variables.
"""
linexps = {}
variables = variables_check(model, name, variables)
for v in variables:
value = get_variable_index_value(v, index)
if value not in linexps:
linexps[value] = gp.LinExpr(v)
else:
linexps[value] += v
return linexps
def print_constraints(constraints):
"""
Print constraints in an aesthetically pleasing way.
"""
print "\n".join([c.constrName for c in constraints])
def get_constraints_multiple(model, names_list, approx=False):
"""
Return a list of constraints given by the constraint
set names in names_list.
"""
cons_list = []
for name in names_list:
cons_list.extend(get_constraints(model, name, approx))
return cons_list
def filter_constraints(constraints, filter_values, exclude=False):
"""
Return a new list of constraints that match the filter values from
the given constraints list.
"""
if not constraints:
raise ValueError("constraints not given")
if not filter_values:
raise ValueError("Dictionary of filter values not given")
new_cons = []
for c in constraints:
add = True
for index, value in filter_values.iteritems():
key = get_constraint_index_value(c, index)
try:
key.replace('"', "")
except AttributeError:
pass
if key != value:
add = False
break
if add:
new_cons.append(c)
if exclude:
# May want to add sorting by varName here
new_cons = [c for c in (set(constraints)-set(new_cons))]
return new_cons
def get_constraints(model, name="", approx=False, filter_values={},
exclude=False):
"""
Return a list of constraints from the model
selected by constraint set name.
A constraint set is composed of all constraints
sharing the same string identifier before the indices:
A(2,3,4) and A(1,2,3) are in the same constraint set, A;
A(2,3,4) and B(2,3,4) are in constraint sets A and B, respectively
PyGurobi by default assumes that constraint set names are
separated from indices by round brackets
"(" and ")". For example, constraints look like env(r,t) - where "env"
in the constraint set name
and "r" and "t" are the index values. See the source for more details.
"""
if not name:
return model.getConstrs()
constraints = []
if not approx:
constraints = [c for c in model.getConstrs()
if c.constrName.split(CON_BRACKET_L)[0] == name]
else:
constraints = [c for c in model.getConstrs()
if name in c.constrName.split(CON_BRACKET_L)[0]]
if filter_values:
constraints = filter_constraints(constraints, filter_values, exclude)
return constraints
def constraints_check(model, name, constraints):
"""
Check to see whether the user specified a list
of constraints or expects them to be retrieved
from the model.
"""
if constraints:
return constraints
if model and name:
constraints = get_constraints(model, name)
elif model and not name:
constraints = model.getConstrs()
return constraints
def get_constraints_attr(attr, model="", name="", constraints=""):
"""
Return a dictionary of constraint names and their
corresponding attribute value.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr:
raise AttributeError("No attributes specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a constraint attribute.".format(attr),
"Get list of all variables attributes with the",
"get_constraint_attrs() method."))
# Check if the attr supplied is not a viable model attribute
if not model and not constraints:
raise ValueError("No model or constraint list given")
constraints = constraints_check(model, name, constraints)
return {c.constrName: getattr(c, attr) for c in constraints}
def print_constraints_attr(attr, model="", name="", constraints=""):
"""
Print to screen a list of constraint attribute values
given by the constraints specified in the names parameter.
Specifiy either model and name parameters or supply a list of constraints
"""
constraints = get_constraints_attr(attr, model=model,
name=name, constraints=constraints)
print "\n".join(["{0}, {1}".format(c, k)
for c, k in sorted(constraints.items())])
def set_constraints_attr(attr, val, model="", name="", constraints=""):
"""
Set an attribute of a model constraint set.
Specifiy either model and name parameters or supply a list of constraints
"""
if not attr or not val:
raise AttributeError("No attribute or value specified")
if not check_constraint_attr(attr):
raise AttributeError("{0}\n{1}\n{2}".format(
"Attribute: {0} not a variable attribute.".format(attr),
"Get list of all variables attributes with the",
"get_variable_attrs() method."))
if not model and not constraints:
raise ValueError("No model or constraints specified")
constraints = constraints_check(model, name, constraints)
for c in constraints:
setattr(c, attr, val)
def set_constraints_rhs_as_percent(percent, model="", name="", constraints=""):
"""
Set the right hand side (rhs) of a constraint set as a percentage of its current rhs.
Specifiy either model and name parameters or supply a list of constraints
"""
if percent != 0 and not percent:
print "Error: No percent specified."
return
try:
percent = float(percent)
except ValueError:
raise ValueError("Percent must be a number. Percent: {}".format(percent))
if not model and not constraints:
raise ValueError("No model or constraints specified.")
constraints = constraints_check(model, name, constraints)
for c in constraints:
cur_rhs = getattr(c, "rhs")
setattr(c, "rhs", percent*cur_rhs)
def remove_constraints_from_model(model, name="", constraints=""):
"""
Remove the given constraints from the model.
Specifiy either model and name parameters or supply a list of constraints
"""
if not model and not constraints:
raise ValueError("No model or constraints given")
if not model:
raise ValueError("No model given")
# This is needed for the case where a list of
# constraints is provided because a model object
# must be provided
if not constraints:
constraints = constraints_check(model, name, constraints)
for c in constraints:
model.remove(c)
def get_constraint_index_value(constraint, index):
"""
Return the value of the given index
for a given constraint.
Constraint names are assumed to be given
as A(a,c,d, ....,f)
"""
value = constraint.constrName.split(",")[index].strip()
if CON_BRACKET_R in value:
value = value[:-1]
elif CON_BRACKET_L in value:
value = value.split(CON_BRACKET_L)[1]
# Not expecting many constraint index values to
# to be floats
if value.isdigit:
try:
value = int(value)
except ValueError:
pass
return value
def get_constraints_by_index(index, model="", name="", constraints=""):
"""
Return a dictionary mapping index values to lists of
constraints having that index value.
Specifiy either model and name parameters or supply a list of constraints
"""
if index != 0 and not index:
raise IndexError("No index given")
if not model and not constraints:
raise ValueError("No model or constraints given")
if not (name and model) and not constraints:
raise ValueError("No constraints specified")
constraints = constraints_check(model, name, constraints)
con_dict = {}
for c in constraints:
value = get_constraint_index_value(c, index)
if value not in con_dict:
con_dict[value] = [c]
else:
con_dict[value].append(c)
return con_dict
def get_constraints_by_index_values(model, name, index_values, exclude=False):
"""
Return a list of constraints filtered by index values.
If exlude is False then return constraints that match the filters.
If exclude is True than return constraints that do not match the filters.
"""
constraints = get_constraints(model, name, index_values, exclude)
return constraints
def get_grb_sense_from_string(sense):
"""
Return the GRB constraint sense object
corresponding to the supplied string.
Convention follows the Gurobi docs:
https://www.gurobi.com/documentation/6.5/refman/sense.html#attr:Sense
"""
if sense == "<":
return gp.GRB.LESS_EQUAL
elif sense == ">":
return gp.GRB.GREATER_EQUAL
elif sense == "=":
return gp.GRB.EQUAL
else:
raise ValueError("Constraint sense is not '<', '>', '='")
def add_constraint_constant(model, variables, constant, sense="<",
con_name=""):
"""
Add constraint to model that says the sum of
variables must be equal, less than or equal, or, greater than or equal, a constant.
"""
if not variables:
raise ValueError("variables list not provided")
linexp = get_linexp_from_variables(variables)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp, sense, constant)
else:
model.addConstr(linexp, sense, constant, con_name)
def check_if_name_a_variable(name, model):
"""
Check if the supplied name corresponds to
a variable set name in the given model.
"""
variables = get_variables(model, name)
if not variables:
return False
return True
def check_if_name_a_constraint(name, model):
"""
Check if the supplied name corresopnd to
a constraint set name in the given model.
"""
constraints = get_constraints(model, name)
if not constraints:
return False
return True
def add_constraint_variables(model, variables1, variables2,
sense="=", con_name=""):
"""
Add constraint to model that says the sum of
a list of variables must be equal, less than or equal,
or greater than or equal, the sum of another list of variables.
"""
if not variables1 or not variables2:
ValueError("Variables list not provided")
linexp1 = get_linexp_from_variables(variables1)
linexp2 = get_linexp_from_variables(variables2)
sense = get_grb_sense_from_string(sense)
if not con_name:
model.addConstr(linexp1, sense, linexp2)
else:
model.addConstr(linexp1, sense, linexp2, con_name)
def graph_by_index(model, variables, index, title="", y_axis="", x_axis=""):
"""
Display a graph of the variable against the specified index
using matplotlib.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
variables_sum = sum_variables_by_index(index, variables=variables)
keys, values = zip(*variables_sum.items())
y = range(len(variables_sum))
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
ax.bar(y, values)
#ax.legend(keys)
plot.show()
def graph_by_two_indices(model, variables, index1, index2, title="",
y_axis="", x_axis=""):
"""
Display a graph of the variable summed over index2
given by index1.
Matplotlib must already be installed to use this.
See: http://matplotlib.org/faq/installing_faq.html
"""
try:
import matplotlib.pyplot as plot
except ImportError:
raise ImportError("{0}\n{1}".format(
"Module Matplotlib not found.",
"Please download and install Matplotlib to use this function."))
fig = plot.figure()
ax = fig.add_subplot(111)
# We need to do this in reverse order to prepare it for graphing
variables_sum = sum_variables_by_two_indices(index2, index1,
variables=variables)
keys, values = zip(*variables_sum.items())
colours = ["b", "g", "r", "c", "y", "m", "k", "w"]
y = range(len(values[0]))
print y
if title:
ax.set_title(title)
if y_axis:
ax.set_ylabel(y_axis)
if x_axis:
ax.set_xlabel(x_axis)
bars = []
prev_bars = [0 for bar in y]
colour_count = 0
for key, value in variables_sum.iteritems():
cur_bars = [k[1] for k in sorted(value.items(), key=lambda x: x[0])]
bars.append(ax.bar(y, cur_bars, bottom=prev_bars,
color=colours[colour_count]))
prev_bars = cur_bars
colour_count += 1
if colour_count == len(colours) - 1:
colour_count = 0
ax.legend(keys)
plot.show()
def print_variables_to_csv(file_name, model="", name="", variables=""):
"""
Print the specified variables to a csv file
given by the file_name parameter.
If no variables specified than all model
variables written.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Variable name", "Value"]
writer.writerow(headers)
variables = variables_check(model, name, variables)
# This will put quotes around strings, because the variable
# names have commas in them.
writer.writerows([ [v.varName, v.X] for v in variables])
def print_variables_to_csv_by_index(file_name, index,
model="", name="", variables=""):
"""
Print the sums of variables by the specified index
to a csv file.
Default behaviour of the function is to overwrite
the given file_name.
"""
if ".csv" not in file_name:
raise ValueError("Non csv file specified")
with open(file_name, "wb+") as write_file:
writer = csv.writer(write_file)
headers = ["Index", "Value"]
writer.writerow(headers)
variables_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
if not variables_dict:
raise ValueError("No variables found")
writer.writerows([ [key, value]
for key, value in sorted(variables_dict.items())])
def print_variables_to_json_by_index(file_name, index, model="",
name="", variables="", index_alias=""):
"""
Print the specified variables to a json file given by file_name
organized by the specified index.
Formatted for reading into nvD3 applications.
Default behaviour is to overwrite file if one exists in
file_name's location.
"""
if ".json" not in file_name:
raise ValueError("Non json file specified")
index_name = index
if index_alias:
index_name = index_alias
var_dict = sum_variables_by_index(index, model=model,
name=name, variables=variables)
data = {index_name: [{ index_name: var_dict }] }
json.dump(data, open(file_name, "wb"))
| mit |
bootphon/crossitlearn | simple_dnn.py | 1 | 32993 | """
A deep neural network with or w/o dropout in one file.
"""
import numpy
import theano
import sys
import math
from theano import tensor as T
from theano import shared
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
BATCH_SIZE = 100
STACKSIZE = 69
def relu_f(vec):
""" Wrapper to quickly change the rectified linear unit function """
return (vec + abs(vec)) / 2.
def softplus_f(v):
return T.nnet.softplus(v)
def dropout(rng, x, p=0.5):
""" Zero-out random values in x with probability p using rng """
if p > 0. and p < 1.:
seed = rng.randint(2 ** 30)
srng = theano.tensor.shared_randomstreams.RandomStreams(seed)
mask = srng.binomial(n=1, p=1.-p, size=x.shape,
dtype=theano.config.floatX)
return x * mask
return x
def fast_dropout(rng, x):
""" Multiply activations by N(1,1) """
seed = rng.randint(2 ** 30)
srng = RandomStreams(seed)
mask = srng.normal(size=x.shape, avg=1., dtype=theano.config.floatX)
return x * mask
def build_shared_zeros(shape, name):
""" Builds a theano shared variable filled with a zeros numpy array """
return shared(value=numpy.zeros(shape, dtype=theano.config.floatX),
name=name, borrow=True)
class Linear(object):
""" Basic linear transformation layer (W.X + b) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
W_values *= 4 # This works for sigmoid activated networks!
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b = build_shared_zeros((n_out,), 'b')
self.input = input
self.W = W
self.b = b
self.params = [self.W, self.b]
self.output = T.dot(self.input, self.W) + self.b
if fdrop:
self.output = fast_dropout(rng, self.output)
def __repr__(self):
return "Linear"
class SigmoidLayer(Linear):
""" Sigmoid activation layer (sigmoid(W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = T.nnet.sigmoid(self.pre_activation)
class ReLU(Linear):
""" Rectified Linear Unit activation layer (max(0, W.X + b)) """
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):
if b is None:
b = build_shared_zeros((n_out,), 'b')
super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation)
self.output = relu_f(self.pre_activation)
class SoftPlus(Linear):
def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=0.):
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
super(SoftPlus, self).__init__(rng, input, n_in, n_out, W, b)
self.pre_activation = self.output
if fdrop:
self.pre_activation = fast_dropout(rng, self.pre_activation, fdrop)
self.output = softplus_f(self.pre_activation)
class DatasetMiniBatchIterator(object):
""" Basic mini-batch iterator """
def __init__(self, x, y, batch_size=BATCH_SIZE, randomize=False):
self.x = x
self.y = y
self.batch_size = batch_size
self.randomize = randomize
from sklearn.utils import check_random_state
self.rng = check_random_state(42)
def __iter__(self):
n_samples = self.x.shape[0]
if self.randomize:
for _ in xrange(n_samples / BATCH_SIZE):
if BATCH_SIZE > 1:
i = int(self.rng.rand(1) * ((n_samples+BATCH_SIZE-1) / BATCH_SIZE))
else:
i = int(math.floor(self.rng.rand(1) * n_samples))
yield (i, self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
else:
for i in xrange((n_samples + self.batch_size - 1)
/ self.batch_size):
yield (self.x[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
class LogisticRegression:
"""Multi-class Logistic Regression
"""
def __init__(self, rng, input, n_in, n_out, W=None, b=None):
if W != None:
self.W = W
else:
self.W = build_shared_zeros((n_in, n_out), 'W')
if b != None:
self.b = b
else:
self.b = build_shared_zeros((n_out,), 'b')
# P(Y|X) = softmax(W.X + b)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.output = self.y_pred
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def negative_log_likelihood_sum(self, y):
return -T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def log_loss(self, y):
# TODO
log_y_hat = T.log(self.p_y_given_x)
#ll = log_y_hat[T.arange(y.shape[0]), y] + log_y_hat[T.arange(y.shape[0]), 1-y]
#return -T.mean(ll)
def training_cost(self, y):
""" Wrapper for standard name """
return self.negative_log_likelihood_sum(y)
#return self.log_loss(y) TODO
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError("y should have the same shape as self.y_pred",
("y", y.type, "y_pred", self.y_pred.type))
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
print("!!! y should be of int type")
return T.mean(T.neq(self.y_pred, numpy.asarray(y, dtype='int')))
class NeuralNet(object):
""" Neural network (not regularized, without dropout) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[Linear, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024, 1024],
n_outs=62 * 3,
rho=0.95, eps=1.E-6,
max_norm=0.,
debugprint=False):
"""
TODO
"""
self.layers = []
self.params = []
self.n_layers = len(layers_types)
self.layers_types = layers_types
assert self.n_layers > 0
self.max_norm = max_norm
self._rho = rho # ``momentum'' for adadelta
self._eps = eps # epsilon for adadelta
self._accugrads = [] # for adadelta
self._accudeltas = [] # for adadelta
self._old_dxs = [] # for adadelta with Nesterov
if theano_rng == None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.x = T.fmatrix('x')
self.y = T.ivector('y')
self.layers_ins = [n_ins] + layers_sizes
self.layers_outs = layers_sizes + [n_outs]
layer_input = self.x
for layer_type, n_in, n_out in zip(layers_types,
self.layers_ins, self.layers_outs):
this_layer = layer_type(rng=numpy_rng,
input=layer_input, n_in=n_in, n_out=n_out)
assert hasattr(this_layer, 'output')
self.params.extend(this_layer.params)
self._accugrads.extend([build_shared_zeros(t.shape.eval(),
'accugrad') for t in this_layer.params])
self._accudeltas.extend([build_shared_zeros(t.shape.eval(),
'accudelta') for t in this_layer.params])
self._old_dxs.extend([build_shared_zeros(t.shape.eval(),
'old_dxs') for t in this_layer.params])
self.layers.append(this_layer)
layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
self.mean_cost = self.layers[-1].negative_log_likelihood(self.y)
self.cost = self.layers[-1].training_cost(self.y)
#self.mean_cost = self.layers[-1].training_cost(self.y) # TODO
if debugprint:
theano.printing.debugprint(self.cost)
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
dimensions_layers_str = map(lambda x: "x".join(map(str, x)),
zip(self.layers_ins, self.layers_outs))
return "_".join(map(lambda x: "_".join((x[0].__name__, x[1])),
zip(self.layers_types, dimensions_layers_str)))
def get_SGD_trainer(self):
""" Returns a plain SGD minibatch trainer with learning rate as param.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
# using mean_cost so that the learning rate is not too dependent
# on the batch size
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
if self.max_norm:
W = param - gparam * learning_rate
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param - gparam * learning_rate
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adagrad_trainer(self):
""" Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate to use
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = accugrad + gparam * gparam
dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def get_adadelta_trainer(self):
""" Returns an Adadelta (Zeiler 2012) trainer using self._rho and
self._eps params.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
# compute the gradients with respect to the model parameters
gparams = T.grad(self.mean_cost, self.params)
# compute list of weights updates
updates = OrderedDict()
for accugrad, accudelta, param, gparam in zip(self._accugrads,
self._accudeltas, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
dx = - T.sqrt((accudelta + self._eps)
/ (agrad + self._eps)) * gparam
updates[accudelta] = (self._rho * accudelta
+ (1 - self._rho) * dx * dx)
if self.max_norm:
W = param + dx
col_norms = W.norm(2, axis=0)
desired_norms = T.clip(col_norms, 0, self.max_norm)
updates[param] = W * (desired_norms / (1e-6 + col_norms))
else:
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
def score_classif(self, given_set):
""" Returns functions to get current classification errors. """
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
score = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.errors,
givens={self.x: batch_x, self.y: batch_y})
def scoref():
""" returned function that scans the entire set given as input """
return [score(batch_x, batch_y) for batch_x, batch_y in given_set]
return scoref
class RegularizedNet(NeuralNet):
""" Neural net with L1 and L2 regularization """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=100,
layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[1024, 1024, 1024],
n_outs=2,
rho=0.9, eps=1.E-6,
L1_reg=0.,
L2_reg=0.,
max_norm=0.,
debugprint=False):
"""
TODO
"""
super(RegularizedNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
L1 = shared(0.)
for param in self.params:
L1 += T.sum(abs(param))
if L1_reg > 0.:
self.cost = self.cost + L1_reg * L1
L2 = shared(0.)
for param in self.params:
L2 += T.sum(param ** 2)
if L2_reg > 0.:
self.cost = self.cost + L2_reg * L2
class DropoutNet(NeuralNet):
""" Neural net with dropout (see Hinton's et al. paper) """
def __init__(self, numpy_rng, theano_rng=None,
n_ins=40*3,
layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_sizes=[4000, 4000, 4000, 4000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=62 * 3,
rho=0.98, eps=1.E-6,
max_norm=0.,
fast_drop=True,
debugprint=False):
"""
TODO
"""
super(DropoutNet, self).__init__(numpy_rng, theano_rng, n_ins,
layers_types, layers_sizes, n_outs, rho, eps, max_norm,
debugprint)
self.dropout_rates = dropout_rates
if fast_drop:
if dropout_rates[0]:
dropout_layer_input = fast_dropout(numpy_rng, self.x)
else:
dropout_layer_input = self.x
else:
dropout_layer_input = dropout(numpy_rng, self.x, p=dropout_rates[0])
self.dropout_layers = []
for layer, layer_type, n_in, n_out, dr in zip(self.layers,
layers_types, self.layers_ins, self.layers_outs,
dropout_rates[1:] + [0]): # !!! we do not dropout anything
# from the last layer !!!
if dr:
if fast_drop:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b, fdrop=True)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W * 1. / (1. - dr),
b=layer.b * 1. / (1. - dr))
# N.B. dropout with dr==1 does not dropanything!!
this_layer.output = dropout(numpy_rng, this_layer.output, dr)
else:
this_layer = layer_type(rng=numpy_rng,
input=dropout_layer_input, n_in=n_in, n_out=n_out,
W=layer.W, b=layer.b)
assert hasattr(this_layer, 'output')
self.dropout_layers.append(this_layer)
dropout_layer_input = this_layer.output
assert hasattr(self.layers[-1], 'training_cost')
assert hasattr(self.layers[-1], 'errors')
# TODO standardize cost
# these are the dropout costs
self.mean_cost = self.dropout_layers[-1].negative_log_likelihood(self.y)
self.cost = self.dropout_layers[-1].training_cost(self.y)
# these is the non-dropout errors
self.errors = self.layers[-1].errors(self.y)
def __repr__(self):
return super(DropoutNet, self).__repr__() + "\n"\
+ "dropout rates: " + str(self.dropout_rates)
def add_fit_and_score(class_to_chg):
""" Mutates a class to add the fit() and score() functions to a NeuralNet.
"""
from types import MethodType
def fit(self, x_train, y_train, x_dev=None, y_dev=None,
max_epochs=20, early_stopping=True, split_ratio=0.1, # TODO 100+ epochs
method='adadelta', verbose=False, plot=False):
"""
TODO
"""
import time, copy
if x_dev == None or y_dev == None:
from sklearn.cross_validation import train_test_split
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train,
test_size=split_ratio, random_state=42)
if method == 'sgd':
train_fn = self.get_SGD_trainer()
elif method == 'adagrad':
train_fn = self.get_adagrad_trainer()
elif method == 'adadelta':
train_fn = self.get_adadelta_trainer()
elif method == 'adadelta_rprop':
train_fn = self.get_adadelta_rprop_trainer()
train_set_iterator = DatasetMiniBatchIterator(x_train, y_train)
dev_set_iterator = DatasetMiniBatchIterator(x_dev, y_dev)
train_scoref = self.score_classif(train_set_iterator)
dev_scoref = self.score_classif(dev_set_iterator)
best_dev_loss = numpy.inf
epoch = 0
# TODO early stopping (not just cross val, also stop training)
if plot:
verbose = True
self._costs = []
self._train_errors = []
self._dev_errors = []
self._updates = []
while epoch < max_epochs:
if not verbose:
sys.stdout.write("\r%0.2f%%" % (epoch * 100./ max_epochs))
sys.stdout.flush()
avg_costs = []
timer = time.time()
for x, y in train_set_iterator:
if method == 'sgd' or 'adagrad' in method:
avg_cost = train_fn(x, y, lr=1.E-2)
elif 'adadelta' in method:
avg_cost = train_fn(x, y)
if type(avg_cost) == list:
avg_costs.append(avg_cost[0])
else:
avg_costs.append(avg_cost)
if verbose:
mean_costs = numpy.mean(avg_costs)
mean_train_errors = numpy.mean(train_scoref())
print(' epoch %i took %f seconds' %
(epoch, time.time() - timer))
print(' epoch %i, avg costs %f' %
(epoch, mean_costs))
print(' method %s, epoch %i, training error %f' %
(method, epoch, mean_train_errors))
if plot:
self._costs.append(mean_costs)
self._train_errors.append(mean_train_errors)
dev_errors = numpy.mean(dev_scoref())
if plot:
self._dev_errors.append(dev_errors)
if dev_errors < best_dev_loss:
best_dev_loss = dev_errors
best_params = copy.deepcopy(self.params)
if verbose:
print('!!! epoch %i, validation error of best model %f' %
(epoch, dev_errors))
epoch += 1
if not verbose:
print("")
for i, param in enumerate(best_params):
self.params[i] = param
def score(self, x, y):
""" error rates """
iterator = DatasetMiniBatchIterator(x, y)
scoref = self.score_classif(iterator)
return numpy.mean(scoref())
class_to_chg.fit = MethodType(fit, None, class_to_chg)
class_to_chg.score = MethodType(score, None, class_to_chg)
if __name__ == "__main__":
add_fit_and_score(DropoutNet)
add_fit_and_score(RegularizedNet)
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
from scipy.ndimage import convolve
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = numpy.concatenate([X] +
[numpy.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = numpy.concatenate([Y for _ in range(5)], axis=0)
return X, Y
from sklearn import datasets, svm, naive_bayes
from sklearn import cross_validation, preprocessing
SPOKEN_WORDS = True
MNIST = False
DIGITS = False
NUDGE_DIGITS = True
FACES = False
TWENTYNEWSGROUPS = False
VERBOSE = True
SCALE = True
PLOT = True
def train_models(x_train, y_train, x_test, y_test, n_features, n_outs,
use_dropout=False, n_epochs=100, numpy_rng=None, # TODO 200+ epochs
svms=False, nb=False, deepnn=True, name=''):
if svms:
print("Linear SVM")
classifier = svm.SVC(gamma=0.001)
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
print("RBF-kernel SVM")
classifier = svm.SVC(kernel='rbf', class_weight='auto')
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if nb:
print("Multinomial Naive Bayes")
classifier = naive_bayes.MultinomialNB()
print(classifier)
classifier.fit(x_train, y_train)
print("score: %f" % classifier.score(x_test, y_test))
if deepnn:
import warnings
warnings.filterwarnings("ignore") # TODO remove
if use_dropout:
n_epochs *= 4
pass
def new_dnn(dropout=False):
if dropout:
print("Dropout DNN")
return DropoutNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],
layers_types=[SoftPlus, SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
layers_sizes=[2000, 2000, 2000, 2000],
dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],
n_outs=n_outs,
max_norm=4.,
fast_drop=False,
debugprint=0)
else:
print("Simple (regularized) DNN")
return RegularizedNet(numpy_rng=numpy_rng, n_ins=n_features,
#layers_types=[LogisticRegression],
#layers_sizes=[],
#layers_types=[ReLU, ReLU, ReLU, LogisticRegression],
#layers_types=[SoftPlus, SoftPlus, SoftPlus, LogisticRegression],
#layers_sizes=[1000, 1000, 1000],
layers_types=[ReLU, LogisticRegression],
layers_sizes=[200],
n_outs=n_outs,
#L1_reg=0.001/x_train.shape[0],
#L2_reg=0.001/x_train.shape[0],
L1_reg=0.,
L2_reg=1./x_train.shape[0],
max_norm=0.,
debugprint=0)
import matplotlib.pyplot as plt
plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(222)
ax3 = plt.subplot(223)
ax4 = plt.subplot(224) # TODO updates of the weights
methods = ['adadelta']
for method in methods:
dnn = new_dnn(use_dropout)
print dnn
dnn.fit(x_train, y_train, max_epochs=n_epochs, method=method, verbose=VERBOSE, plot=PLOT)
test_error = dnn.score(x_test, y_test)
print("score: %f" % (1. - test_error))
ax1.plot(numpy.log10(dnn._costs), label=method)
#ax2.plot(numpy.log10(dnn._train_errors), label=method)
#ax3.plot(numpy.log10(dnn._dev_errors), label=method)
ax2.plot(dnn._train_errors, label=method)
ax3.plot(dnn._dev_errors, label=method)
#ax4.plot(dnn._updates, label=method) TODO
ax4.plot([test_error for _ in range(10)], label=method)
ax1.set_xlabel('epoch')
ax1.set_ylabel('cost (log10)')
ax2.set_xlabel('epoch')
ax2.set_ylabel('train error')
ax3.set_xlabel('epoch')
ax3.set_ylabel('dev error')
ax4.set_ylabel('test error')
plt.legend()
plt.savefig('training_log' + name + '.png')
if MNIST:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
X = numpy.asarray(mnist.data, dtype='float32')
if SCALE:
#X = preprocessing.scale(X)
X /= 255.
y = numpy.asarray(mnist.target, dtype='int32')
#target_names = mnist.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % len(set(y)))
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='MNIST')
if DIGITS:
digits = datasets.load_digits()
data = numpy.asarray(digits.data, dtype='float32')
target = numpy.asarray(digits.target, dtype='int32')
x = data
y = target
if NUDGE_DIGITS:
x, y = nudge_dataset(x, y)
if SCALE:
x = preprocessing.scale(x)
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, x.shape[1],
len(set(target)), numpy_rng=numpy.random.RandomState(123),
name='digits')
if FACES:
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
lfw_people = datasets.fetch_lfw_people(min_faces_per_person=70,
resize=0.4)
X = numpy.asarray(lfw_people.data, dtype='float32')
if SCALE:
X = preprocessing.scale(X)
y = numpy.asarray(lfw_people.target, dtype='int32')
target_names = lfw_people.target_names
print("Total dataset size:")
print("n samples: %d" % X.shape[0])
print("n features: %d" % X.shape[1])
print("n classes: %d" % target_names.shape[0])
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.2, random_state=42)
train_models(x_train, y_train, x_test, y_test, X.shape[1],
len(set(y)), numpy_rng=numpy.random.RandomState(123),
name='faces')
if TWENTYNEWSGROUPS:
from sklearn.feature_extraction.text import TfidfVectorizer
newsgroups_train = datasets.fetch_20newsgroups(subset='train')
vectorizer = TfidfVectorizer(encoding='latin-1', max_features=10000)
#vectorizer = HashingVectorizer(encoding='latin-1')
x_train = vectorizer.fit_transform(newsgroups_train.data)
x_train = numpy.asarray(x_train.todense(), dtype='float32')
y_train = numpy.asarray(newsgroups_train.target, dtype='int32')
newsgroups_test = datasets.fetch_20newsgroups(subset='test')
x_test = vectorizer.transform(newsgroups_test.data)
x_test = numpy.asarray(x_test.todense(), dtype='float32')
y_test = numpy.asarray(newsgroups_test.target, dtype='int32')
train_models(x_train, y_train, x_test, y_test, x_train.shape[1],
len(set(y_train)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=True, deepnn=True,
name='20newsgroups')
if SPOKEN_WORDS:
# words done by "say", shapes of their filterbanks
#>>> shapes
#array([[62, 40],
# [65, 40],
# [58, 40],
# ...,
# [85, 40],
# [79, 40],
# [51, 40]])
#>>> shapes.mean(axis=0)
#array([ 70.87751196, 40. ])
#>>> shapes.std(axis=0)
#array([ 12.94580736, 0. ])
#>>> shapes.min(axis=0)
#array([39, 40])
words_fbanks = numpy.load("all_words_pascal1k.npz")
n_tokens = len([k for k in words_fbanks.keys()])
lexicon = set([w.split('_')[1] for w in words_fbanks.keys()])
lexicon = [w for w in lexicon] # we need an ordered collection
n_words = len(lexicon)
all_fbanks = numpy.concatenate([v for _, v in words_fbanks.iteritems()])
print all_fbanks.shape
mean = all_fbanks.mean(axis=0)
print mean.shape
std = all_fbanks.std(axis=0)
print std.shape
# take 69 fbanks in the middle of the word and pad with 0s if needed
X = numpy.zeros((n_tokens, 40*STACKSIZE), dtype='float32')
y = numpy.zeros(n_tokens, dtype='int32')
for i, (swf, fb) in enumerate(words_fbanks.iteritems()):
spkr, word, _ = swf.split('_')
l = fb.shape[0]
m = l/2
s = max(0, m - ((STACKSIZE-1) / 2))
e = min(l-1, m + ((STACKSIZE-1) / 2))
tmp = (fb - mean) / std
tmp = tmp[s:e+1].flatten()
diff = 40*STACKSIZE - tmp.shape[0]
if not diff:
X[i] = tmp
else:
X[i][diff/2:-diff/2] = tmp
y[i] = lexicon.index(word)
# train the DNN, with the training set as test set if let in this form:
train_models(X, y, X, y, X.shape[1],
len(set(y)),
numpy_rng=numpy.random.RandomState(123),
svms=False, nb=False, deepnn=True,
name='spoken_words')
| mit |
ybalgir/Quantop | Lec7.py | 1 | 1822 | import numpy as np
import pandas as pd
from statsmodels import regression
import statsmodels.api as sm
import matplotlib.pyplot as plt
import math
import pandas_datareader.data as web
from datetime import datetime
def Starter_Lec7():
start = datetime(2014, 1, 1)
end = datetime(2015, 1, 1)
asset = web.DataReader("TSLA","yahoo",start,end)
asset_closingPrice = asset['Close']
benchmark = web.DataReader("SPY","yahoo",start,end)
benchmark_closingPrice = benchmark['Close']
r_a = asset_closingPrice.pct_change()[1:]
r_b = benchmark_closingPrice.pct_change()[1:]
modelSummary = linreg(r_a,r_b)
print("{0} {1} \n\n".format(modelSummary,type(modelSummary)))
def linreg(X,Y):
#running linear regression
X = sm.add_constant(X)
model = regression.linear_model.OLS(Y,X).fit()
a = model.params[0]
b = model.params[1]
X = pd.DataFrame(X, columns=['Close']) #Y_CMT Neat trick to extract columns from a pandas dataframe
# Return summary of the regression and plot results
X2 = np.linspace(float(X.min()), float(X.max()), 100)
Y_hat = X2 * b + a
plt.scatter(X, Y, alpha=0.3) # Plot the raw data
plt.plot(X2, Y_hat, 'r', alpha=0.9) # Add the regression line, colored in red
plt.xlabel('X Value')
plt.ylabel('Y Value')
plt.show()
return model.summary()
def TestPlotting():
N = 8
y = np.zeros(N)
x1 = np.linspace(0, 10, N, endpoint=True)
x2 = np.linspace(0, 10, N, endpoint=False)
plt.plot(x1, y, 'o')
plt.plot(x2, y + 0.5, 'o')
plt.ylim([-0.5, 1])
plt.show()
def NumpyMatrix():
array1 = np.matrix([[1,2,3],[4,5,6],[7,8,9]])
print("{0} {1} \n\n".format(array1[:,2],type(array1)))
array1 = array1[:,2]
print("{0} {1} \n\n".format(array1,type(array1)))
| gpl-3.0 |
studywolf/pydmps | pydmps/dmp_rhythmic.py | 1 | 5004 | """
Copyright (C) 2013 Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from pydmps.dmp import DMPs
import numpy as np
class DMPs_rhythmic(DMPs):
"""An implementation of discrete DMPs"""
def __init__(self, **kwargs):
"""
"""
# call super class constructor
super(DMPs_rhythmic, self).__init__(pattern="rhythmic", **kwargs)
self.gen_centers()
# set variance of Gaussian basis functions
# trial and error to find this spacing
self.h = np.ones(self.n_bfs) * self.n_bfs # 1.75
self.check_offset()
def gen_centers(self):
"""Set the centre of the Gaussian basis
functions be spaced evenly throughout run time"""
c = np.linspace(0, 2 * np.pi, self.n_bfs + 1)
c = c[0:-1]
self.c = c
def gen_front_term(self, x, dmp_num):
"""Generates the front term on the forcing term.
For rhythmic DMPs it's non-diminishing, so this
function is just a placeholder to return 1.
x float: the current value of the canonical system
dmp_num int: the index of the current dmp
"""
if isinstance(x, np.ndarray):
return np.ones(x.shape)
return 1
def gen_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs the goal is the average of the
desired trajectory.
y_des np.array: the desired trajectory to follow
"""
goal = np.zeros(self.n_dmps)
for n in range(self.n_dmps):
num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = 0.5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
def gen_psi(self, x):
"""Generates the activity of the basis functions for a given
canonical system state or path.
x float, array: the canonical system state or path
"""
if isinstance(x, np.ndarray):
x = x[:, None]
return np.exp(self.h * (np.cos(x - self.c) - 1))
def gen_weights(self, f_target):
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched.
f_target np.array: the desired forcing term trajectory
"""
# calculate x and psi
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
# efficiently calculate BF weights using weighted linear regression
for d in range(self.n_dmps):
for b in range(self.n_bfs):
self.w[d, b] = np.dot(psi_track[:, b], f_target[:, d]) / (
np.sum(psi_track[:, b]) + 1e-10
)
# ==============================
# Test code
# ==============================
if __name__ == "__main__":
import matplotlib.pyplot as plt
# test normal run
dmp = DMPs_rhythmic(n_dmps=1, n_bfs=10, w=np.zeros((1, 10)))
y_track, dy_track, ddy_track = dmp.rollout()
plt.figure(1, figsize=(6, 3))
plt.plot(np.ones(len(y_track)) * dmp.goal, "r--", lw=2)
plt.plot(y_track, lw=2)
plt.title("DMP system - no forcing term")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend(["goal", "system state"], loc="lower right")
plt.tight_layout()
# test imitation of path run
plt.figure(2, figsize=(6, 4))
n_bfs = [10, 30, 50, 100, 10000]
# a straight line to target
path1 = np.sin(np.arange(0, 2 * np.pi, 0.01) * 5)
# a strange path to target
path2 = np.zeros(path1.shape)
path2[int(len(path2) / 2.0) :] = 0.5
for ii, bfs in enumerate(n_bfs):
dmp = DMPs_rhythmic(n_dmps=2, n_bfs=bfs)
dmp.imitate_path(y_des=np.array([path1, path2]))
y_track, dy_track, ddy_track = dmp.rollout()
plt.figure(2)
plt.subplot(211)
plt.plot(y_track[:, 0], lw=2)
plt.subplot(212)
plt.plot(y_track[:, 1], lw=2)
plt.subplot(211)
a = plt.plot(path1, "r--", lw=2)
plt.title("DMP imitate path")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend([a[0]], ["desired path"], loc="lower right")
plt.subplot(212)
b = plt.plot(path2, "r--", lw=2)
plt.title("DMP imitate path")
plt.xlabel("time (ms)")
plt.ylabel("system trajectory")
plt.legend(["%i BFs" % i for i in n_bfs], loc="lower right")
plt.tight_layout()
plt.show()
| gpl-3.0 |
fracturica/shardlib | shardlib/comp_analysis/SIMCompAnalysis.py | 1 | 23592 | import dataProcessing as dp
import plotFuncs as pf
import numpy as np
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
from matplotlib.path import Path
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
from compAnalysisBase import CompAnalysisBase
class SIMCompAnalysis(CompAnalysisBase):
def __init__(self, leavesQueue, criteria, sifs):
self.queue = leavesQueue
self.sifs = sifs
self.crit = criteria
def printQueueItems(self, items):
self.queue.printTitle()
for i in sorted(items):
self.queue.printQueueItem(i)
def getItemNodeDict(self, items, queue):
qdict = queue.getQueueDict()
return dict([(i, qdict[i]) for i in items])
def calcAlphaVal(self, sif, item):
vals = len(self.dataDicts[0][0][sif][item])
if vals > 1000:
return 0.1
else:
return 1
class BoxCompPlot(SIMCompAnalysis):
def createCompBoxPlot(self, items, errType, fig):
self.items = items
self.errType = errType
self.createDataDictAndEstBoxPlot()
self.createDataStrBoxPlot()
self.createFigure(fig)
def createDataStrBoxPlot(self):
dd = self.getItemNodeDict(self.items, self.queue)
optKey = self.getLeavesOptKey()
data = [dd, optKey, 'Number in Queue', '']
self.dataStr = [data]
def getLeavesOptKey(self):
return sorted(self.est.items(), key=lambda x: abs(x[1]))[0][0]
def createDataDictAndEstBoxPlot(self):
dataDict = {s: {} for s in self.sifs}
est = {i: {} for i in self.items}
dd = self.getItemNodeDict(self.items, self.queue)
for i in self.items:
node = dd[i]
errs, est[i] = self.getNodeErrsEst(node)
for s in self.sifs:
dataDict[s][i] = errs[s]
self.est = {i: est[i][self.crit[1]] for i in self.items}
self.dataDicts = [dataDict]
def getNodeErrsEst(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
est = adn.getEstimates()[self.crit[0]]
errs = adn.getErrors()[self.errType]
return errs, est
class HistCompPlot(SIMCompAnalysis):
def createCompHistPlot(self, items, errType, xlim, fig):
self.fig = fig
self.items = items
self.errType = errType
self.xlim = xlim
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
dd = self.getItemNodeDict(self.items.keys(), self.queue)
xlabel = 'errors "{0}"'.format(self.errType)
data = [dd, None, xlabel, 'hist']
self.dataStr = [data]
def createDataDict(self):
data = {s: {} for s in self.sifs}
for i in self.items.keys():
node = self.dataStr[0][0][i]
errs = self.getNodeErrors(node)
for s in self.sifs:
data[s][i] = errs[s]
self.dataDicts = [data]
def getNodeErrors(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
errs = adn.getErrors()[self.errType]
return errs
def setAxesXlim(self):
for ax in self.axes:
ax.set_xlim(self.xlim)
def setAxesYlim(self):
ymin, ymax = 10e16, 10e-16
for ax in self.axes:
y1, y2 = ax.get_ylim()
ymin = y1 if y1 < ymin else ymin
ymax = y2 if y2 > ymax else ymax
for ax in self.axes:
ax.set_ylim((ymin, ymax))
def setLegend(self, handles):
text = 'Node: '
labels = [text + str(i) for i in sorted(handles.keys())]
handles = [handles[i] for i in sorted(handles.keys())]
self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1),
loc=2, borderaxespad=0)
def createFigure(self):
self.axes = []
self.createFigureAxes()
handles = {}
for k in range(len(self.axes)):
s = self.sifs[k]
for i in self.items.keys():
n, b, p = self.axes[k].hist(
self.dataDicts[0][s][i],
self.items[i],
normed=True,
alpha=0.5)
handles[i] = p[0]
self.setAxesXlim()
self.setAxesYlim()
self.setLegend(handles)
self.setXlabels()
self.printQueueItems(self.items.keys())
class CorrCompPlot(SIMCompAnalysis):
def createCompCorrPlot(self, items, quantityType, ylim, fig):
self.fig = fig
self.items = items
self.qt = quantityType
self.ylim = ylim
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
dd = self.getItemNodeDict(self.items, self.queue)
data = [dd, None, 'analytical values', 'analysis vs analytical']
self.dataStr = [data]
def createDataDict(self):
dataX = {s: {} for s in self.sifs}
dataY = {s: {} for s in self.sifs}
for i in self.items:
node = self.dataStr[0][0][i]
anSol, res = self.getNodeParams(node)
for s in self.sifs:
dataX[s][i] = anSol[s]
dataY[s][i] = res[s]
self.dataDicts = [[dataX, dataY]]
def getNodeParams(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
anSol = adn.getAnSol()
res = adn.getDataByType(self.qt)
return anSol, res
def getReferenceXYVals(self):
minV = {s: 10e16 for s in self.sifs}
maxV = {s: -10e16 for s in self.sifs}
for s in self.sifs:
for i in self.items:
mn = min(self.dataDicts[0][0][s][i])
mx = max(self.dataDicts[0][0][s][i])
minV[s] = mn if mn < minV[s] else minV[s]
maxV[s] = mx if mx > maxV[s] else maxV[s]
if self.qt == 'results':
refX = {s: [minV[s], maxV[s]] for s in self.sifs}
return refX, refX
elif self.qt in ['difference', 'normedDiff']:
refX = {s: [max(0, minV[s]), maxV[s]] for s in self.sifs}
refY = {s: [0, 0] for s in self.sifs}
return refX, refY
else:
raise NotImplementedError
def getXYVals(self, sif, item):
if self.qt == 'results':
X = self.dataDicts[0][0][sif][item]
Y = self.dataDicts[0][1][sif][item]
elif self.qt in ['difference', 'normedDiff']:
X = np.abs(self.dataDicts[0][0][sif][item])
Y = self.dataDicts[0][1][sif][item]
else:
raise NotImplementedError
return X, Y
def createPlot(self):
self.handles = {}
refX, refY = self.getReferenceXYVals()
for k in range(len(self.axes)):
s = self.sifs[k]
for i in self.items:
alpha = self.calcAlphaVal(s, i)
X, Y = self.getXYVals(s, i)
p, = self.axes[k].plot(X, Y, '.', alpha=alpha)
self.handles[i] = p
r, = self.axes[k].plot(refX[s], refY[s], 'k', lw=1.5)
self.handles['reference'] = r
def setXLim(self):
refX, refY = self.getReferenceXYVals()
for k in range(len(self.axes)):
s = self.sifs[k]
self.axes[k].set_xlim(refX[s])
def setLegend(self):
text = 'Node: '
labels = [text + str(i) for i in self.items]
handles = [self.handles[i] for i in self.items]
if 'reference' in self.handles.keys():
handles.append(self.handles['reference'])
labels.append('ref line')
self.axes[0].legend(handles, labels, bbox_to_anchor=(1.02, 1),
loc=2, borderaxespad=0)
def setYLim(self):
if isinstance(self.ylim, (list, tuple)):
for ax in self.axes:
ax.set_ylim(self.ylim)
def createFigure(self):
self.axes = []
self.createFigureAxes()
self.createPlot()
self.setXLim()
self.setLegend()
self.printQueueItems(self.items)
self.setYLim()
class RangeCompPlot(SIMCompAnalysis):
def createCompRangePlot(self, items, opts, fig):
self.fig = fig
self.items = items
self.opts = opts
self.createDataStr()
self.createDataDict()
self.createFigure()
def createDataStr(self):
self.dataStr = []
qdict = self.queue.getQueueDict()
for k in sorted(self.items.keys()):
optSim = self.getOptSim(qdict[k])
data = [{k: qdict[k]}, optSim, 'angles',
self.getSubplotTitle(qdict[k])]
self.dataStr.append(data)
def getOptSim(self, node):
if self.opts['optSim']:
sims = node.getSuccessfulMembers()
optSim = pf.getSimIdsWithLowestErrorPerDH(
sims, self.crit[0], self.crit[1]).values()[0][0]
return optSim
else:
return None
def createDataDict(self):
self.dataDicts = []
for item in self.dataStr:
node = item[0].values()[0]
self.dataDicts.append(self.getNodeParams(node))
def getNodeParams(self, node):
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
angles = adn.getAngles()
results = adn.getResults()
ansol = adn.getAnSol()
errors = adn.getErrors()[self.opts['errors']]
return angles, results, ansol, errors
def createSlices(self):
self.slices = []
i = 0
for k in sorted(self.items.keys()):
numInt = self.items[k]
angles = self.dataDicts[i][0]
sl = self.createSliceIndices(angles, numInt)
self.slices.append(sl)
i += 1
def createSliceIndices(self, vals, numInts):
intLen = (max(vals) - min(vals)) / float(numInts)
indices = [[] for i in range(numInts)]
for x in vals:
i = int(x / intLen)
if i < numInts - 1:
indices[i].append(x)
else:
indices[-1].append(x)
if [] in indices:
raise ValueError('Try reducing the number of intervals.')
sliceInd = [[] for i in range(numInts)]
for i in range(numInts):
minVal = indices[i][0]
maxVal = indices[i][-1]
ind0 = np.where(vals == minVal)[0][0]
ind1 = np.where(vals == maxVal)[-1][-1] + 1
sliceInd[i].append(ind0)
sliceInd[i].append(ind1)
sliceInd[-1][1] += 1
return sliceInd
def createFigure(self):
self.axes = []
self.createFigureAxes()
if self.opts['range']:
self.createSlices()
self.plotRangeArea()
if self.opts['dataPoints']:
self.createDataPointsPlot()
if self.opts['analytical']:
self.createAnSolPlot()
if self.opts['optSim']:
self.createOptSimPlot()
self.setXLim()
self.createLegend()
self.setSubplotTitles()
self.setYlimits()
def createLegend(self):
handles = []
labels = []
h, l = self.axes[0].get_legend_handles_labels()
ind = len(self.dataStr) - 1
self.axes[ind].legend(h, l, bbox_to_anchor=(1, 1.02), loc=2)
def setXLim(self):
for n in range(len(self.dataStr)):
i = self.getItemKey(n)
for sif in self.sifs:
ax = self.getAxes(i, sif)
angles = self.dataDicts[n][0]
ax.set_xlim((min(angles), max(angles)))
def createOptSimPlot(self):
for n in range(len(self.dataDicts)):
i = self.getItemKey(n)
ad = dp.AnalysisData(self.dataStr[n][1])
ad.calcAnSol()
ad.calculateStats()
angles = ad.getAngles()
for sif in self.sifs:
ax = self.getAxes(i, sif)
res = ad.getResults()[sif]
ax.plot(angles, res, 'lime', lw=1,
label='optSim')
def createDataPointsPlot(self):
for n in range(len(self.dataStr)):
i = self.getItemKey(n)
for sif in self.sifs:
angles = self.dataDicts[n][0]
ax = self.getAxes(i, sif)
for dt in self.opts['data']:
dInd, color = self.getDataIndAndColor(dt)
data = self.dataDicts[n][dInd][sif]
alpha = self.calcAlphaValRP(n)
ax.plot(angles, data,
linestyle='-', marker='.',
color=color, alpha=alpha,
label=dt)
def calcAlphaValRP(self, n):
vals = len(self.dataDicts[n][0])
if vals > 1000:
return 0.05
else:
return 0.3
def createAnSolPlot(self):
for n in range(len(self.items.keys())):
i = self.getItemKey(n)
for sif in self.sifs:
ax = self.getAxes(i, sif)
angles = self.dataDicts[n][0]
anSol = self.dataDicts[n][2][sif]
ax.plot(angles, anSol, 'k', lw=2,
label='analytical')
def getAxes(self, item, sif):
itemInd = sorted(self.items.keys()).index(item)
itemLen = len(self.items)
ax = self.axes[itemLen * self.sifs.index(sif) + itemInd]
return ax
def getItemKey(self, n):
return sorted(self.items.keys())[n]
def plotRangeArea(self):
for n in range(len(self.items)):
i = self.getItemKey(n)
for sif in self.sifs:
axes = self.getAxes(i, sif)
self.plotRangeAreaPerAxes(axes, n, sif)
def getDataIndAndColor(self, dataType):
dataInds = {'results': 1, 'errors': 3}
colors = {'results': 'b', 'errors': 'r'}
return dataInds[dataType], colors[dataType]
def createVerts(self, slices, angles, values, func):
x, y, verts = [], [], []
valsl = [values[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices]
angsl = [angles[s[0] - 1 if s[0] > 0 else 0:s[1]] for s in slices]
for a in angsl:
x.append(a[0])
x.append(a[-1])
for v in valsl:
y.append(func(v))
y.append(func(v))
verts = [[xi, yi] for xi, yi in zip(x, y)]
return verts
def createVerts2(self, slices, angles, values, func):
x, y, verts = [], [], []
valsl = [values[s[0]:s[1]] for s in slices]
angsl = [angles[s[0]:s[1]] for s in slices]
for an, va in zip(angsl, valsl):
y.append(func(va))
print va, y
print np.where(va == y[-1])
ind = np.where(va == y[-1])[0][0]
x.append(an[ind])
x.append(angles[-1])
x.insert(0, angles[0])
yavg = 0.5 * (y[0] + y[-1])
y.append(yavg)
y.insert(0, yavg)
verts = [[xi, yi] for xi, yi in zip(x, y)]
return verts
def plotRangeAreaPerAxes(self, axes, itemInd, sif):
vertMethods = {1: self.createVerts, 2: self.createVerts2}
vertFunc = vertMethods[self.opts['rangeType']]
slices = self.slices[itemInd]
angles = self.dataDicts[itemInd][0]
for dt in self.opts['data']:
dInd, color = self.getDataIndAndColor(dt)
values = self.dataDicts[itemInd][dInd][sif]
verts1 = vertFunc(slices, angles, values, min)
verts2 = vertFunc(slices, angles, values, max)[::-1]
verts = verts1 + verts2 + [verts2[-1]]
codes = self.createClosedPathCodes(verts)
p = Path(verts, codes)
patch = mpl.patches.PathPatch(
p,
facecolor=color,
edgecolor='none',
alpha=0.2,
label=dt +
' range')
axes.add_patch(patch)
patch = mpl.patches.PathPatch(p, edgecolor=color,
fill=False, lw=0.75, alpha=0.6)
axes.add_patch(patch)
def createClosedPathCodes(self, verts):
codes = [Path.MOVETO]
for i in range(len(verts) - 2):
codes.append(Path.LINETO)
codes.append(Path.CLOSEPOLY)
return codes
class BoundsCompPlot(SIMCompAnalysis):
def createBoundsPlot(self, items, targets, fig, tol=0.1, iterLim=100):
self.items = items
self.targets = targets
self.fig = fig
self.iterLim = iterLim
self.tol = tol
self.createDataStr()
self.createDataDicts()
self.printStats()
self.createFigure()
def createDataStr(self):
self.dataStr = []
qdict = self.queue.getQueueDict()
for i in self.items:
dd = [{i: qdict[i]}, None, 'angles',
self.getSubplotTitle(qdict[i])]
self.dataStr.append(dd)
def createDataDicts(self):
self.dataDicts = []
for n in range(len(self.items)):
i = self.items[n]
log = {s: {t: {'sigma': [], 'pip': []}
for t in self.targets.keys()}
for s in self.sifs}
node = self.dataStr[n][0][i]
adn = dp.AnalysisNodeData(node, self.sifs)
adn.performOperations()
sigmaUp = 2 * adn.getAnSolParams()['sigma']
sigmaLow = 0
for s in self.sifs:
for t in self.targets.keys():
log[s][t] = self.findSigmaBound(
adn, sigmaUp, sigmaLow, s,
self.targets[t], log[s][t])
self.dataDicts.append([adn, log])
def printStats(self):
for n in range(len(self.dataStr)):
i = self.items[n]
print self.dataStr[n][3]
log = self.dataDicts[n][1]
for s in self.sifs:
sigmas, bounds, its = [], [], []
for t in log[s].keys():
u = log[s][t]
sigmas.append(u['sigma'][-1])
bounds.append(u['pip'][-1])
its.append(len(u['sigma']))
info = '{0}sigma=[{1:.4}, {2:.4}] | bounds=[{3:.4}%, {4:.4}%] | iterations=[{5}, {6}]'.format(
' {0} '.format(s), sigmas[0], sigmas[1], bounds[0], bounds[1], its[0], its[1])
print info
def createFigure(self):
self.axes = []
self.createFigureAxes()
self.createPlot()
self.setXLimits()
self.setYlimits()
self.setSubplotTitles()
def setXLimits(self):
for n in range(len(self.dataStr)):
i = self.items[n]
adn = self.dataDicts[n][0]
a = adn.getAngles()
lims = (min(a), max(a))
for s in self.sifs:
ax = self.getAxes(i, s)
ax.set_xlim(lims)
def getAxes(self, item, sif):
itemLen = len(self.items)
itemInd = self.items.index(item)
ax = self.axes[itemLen * self.sifs.index(sif) + itemInd]
return ax
def getAlphaVal(self, item):
n = self.items.index(item)
adn = self.dataDicts[n][0]
if len(adn.getAngles()) > 1000:
return 0.1
else:
return 1
def createPlot(self):
for n in range(len(self.dataStr)):
i = self.items[n]
adn = self.dataDicts[n][0]
logs = self.dataDicts[n][1]
alpha = self.getAlphaVal(i)
for s in self.sifs:
ax = self.getAxes(i, s)
sigmaUpper = logs[s]['upper']['sigma'][-1]
sigmaLower = logs[s]['lower']['sigma'][-1]
ins, outs = self.getInOutPoints(adn,
sigmaLower, sigmaUpper, s)
ax.plot(ins[0], ins[1], 'b.',
label='inside bounds', alpha=alpha)
ax.plot(outs[0], outs[1], 'r.',
label='outside bounds', alpha=alpha)
angles = adn.getAngles()
anSol = adn.getAnSol()[s]
ax.plot(angles, anSol, 'k', lw=1.5,
label='analytical')
lowerBound = adn.calcSIFsForSigmaAndSIF(
sigmaLower, s)
upperBound = adn.calcSIFsForSigmaAndSIF(
sigmaUpper, s)
ax.plot(angles, upperBound, 'lime', lw=1.5,
label='bounds')
ax.plot(angles, lowerBound, 'lime', lw=1.5)
def findSigmaBound(self, adn, sigmaUp, sigmaLow,
sif, target, log):
sigma = 0.5 * (sigmaUp + sigmaLow)
pip = self.getPercentPointsInPoly(adn, sigma, sif)
log['pip'].append(pip)
log['sigma'].append(sigma)
if ((pip >= target - self.tol and pip <= target + self.tol) or
(len(log['sigma']) == self.iterLim)):
return log
elif pip < target - self.tol:
sigmaLow = sigma
return self.findSigmaBound(adn, sigmaUp, sigmaLow,
sif, target, log)
elif pip > target + self.tol:
sigmaUp = sigma
return self.findSigmaBound(adn, sigmaUp, sigmaLow,
sif, target, log)
else:
raise ValueError('unexpected condition reached')
def getPercentPointsInPoly(self, adn, sigma, sif):
allnum, numin, numout = self.countPointInOutOfContour(
adn, sigma, sif)
assert abs(numin + numout - allnum) < 10e-8
return float(numin) / float(allnum) * 100
def countPointInOutOfContour(self, adn, sigma, sif):
tfl = self.getInOutOfContour(adn, sigma, sif)
numin = np.sum(tfl)
allnum = len(tfl)
numout = allnum - numin
return allnum, numin, numout
def getInOutOfContour(self, adn, sigma, sif):
angles = adn.getAngles()
results = abs(adn.getResults()[sif])
points = [[xi, yi] for xi, yi in zip(angles, results)]
yVals = abs(np.array(adn.calcSIFsForSigmaAndSIF(sigma, sif)))
return self.getInOutPointsArray(angles, yVals, points)
def getInOutPointsArray(self, angles, yVals, points):
path = Path(self.createVertsForPolyPath(angles, yVals))
return path.contains_points(points, radius=0)
def getInOutPoints(self, adn, sigmaLow, sigmaUp, sif):
inoutLow = self.getInOutOfContour(adn, sigmaLow, sif)
inoutUp = self.getInOutOfContour(adn, sigmaUp, sif)
angles = adn.getAngles()
res = adn.getResults()[sif]
inAngles, inVals = [], []
outAngles, outVals = [], []
for i in range(len(inoutUp)):
if inoutLow[i] or not inoutUp[i]:
outAngles.append(angles[i])
outVals.append(res[i])
else:
inAngles.append(angles[i])
inVals.append(res[i])
return [[inAngles, inVals], [outAngles, outVals]]
def createVertsForPolyPath(self, x, y):
verts = [[xi, yi] for xi, yi in zip(x, y)]
verts.insert(0, [verts[0][0], -10e16])
verts.append([verts[-1][0], -10e16])
return verts
| mit |
slarosa/QGIS | python/plugins/sextante/algs/MeanAndStdDevPlot.py | 3 | 3304 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from sextante.parameters.ParameterTable import ParameterTable
from sextante.parameters.ParameterTableField import ParameterTableField
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.outputs.OutputHTML import OutputHTML
from sextante.tools import *
from sextante.core.QGisLayers import QGisLayers
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
NAME_FIELD = "NAME_FIELD"
MEAN_FIELD = "MEAN_FIELD"
STDDEV_FIELD = "STDDEV_FIELD"
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = QGisLayers.getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width,
color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'))
plt.xticks(ind, values[namefieldname], rotation = 45)
plotFilename = output +".png"
lab.savefig(plotFilename)
f = open(output, "w")
f.write("<img src=\"" + plotFilename + "\"/>")
f.close()
def defineCharacteristics(self):
self.name = "Mean and standard deviation plot"
self.group = "Graphics"
self.addParameter(ParameterTable(self.INPUT, "Input table"))
self.addParameter(ParameterTableField(self.NAME_FIELD, "Category name field", self.INPUT,ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD, "Mean field", self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD, "StdDev field", self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, "Output"))
| gpl-2.0 |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_topo_compare_conditions.py | 3 | 2175 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'yellow', 'green'
title = 'MNE sample data - left vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title)
plt.show()
| bsd-3-clause |
nesterione/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
kkouer/PcGcs | Lib/site-packages/numpy/core/code_generators/ufunc_docstrings.py | 57 | 85797 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
y : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like` for further details.
See Also
--------
zeros_like, ones
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| gpl-3.0 |
bdestombe/flopy-1 | flopy/utils/mflistfile.py | 1 | 25207 | """
This is a set of classes for reading budget information out of MODFLOW-style
listing files. Cumulative and incremental budgets are returned as numpy
recarrays, which can then be easily plotted.
"""
import collections
import os
import re
import sys
from datetime import timedelta
import numpy as np
from ..utils.utils_def import totim_to_datetime
class ListBudget(object):
"""
MODFLOW family list file handling
Parameters
----------
file_name : str
the list file name
budgetkey : str
the text string identifying the budget table. (default is None)
timeunit : str
the time unit to return in the recarray. (default is 'days')
Notes
-----
The ListBudget class should not be instantiated directly. Access is
through derived classes: MfListBudget (MODFLOW), SwtListBudget (SEAWAT)
and SwrListBudget (MODFLOW with the SWR process)
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental, cumulative = mf_list.get_budget()
>>> df_in, df_out = mf_list.get_dataframes(start_datetime="10-21-2015")
"""
def __init__(self, file_name, budgetkey=None, timeunit='days'):
# Set up file reading
assert os.path.exists(file_name)
self.file_name = file_name
if sys.version_info[0] == 2:
self.f = open(file_name, 'r')
elif sys.version_info[0] == 3:
self.f = open(file_name, 'r', encoding='ascii', errors='replace')
self.tssp_lines = 0
# Assign the budgetkey, which should have been overriden
if budgetkey is None:
self.set_budget_key()
else:
self.budgetkey = budgetkey
self.totim = []
self.timeunit = timeunit
self.idx_map = []
self.entries = []
self.null_entries = []
self.time_line_idx = 20
if timeunit.upper() == 'SECONDS':
self.timeunit = 'S'
self.time_idx = 0
elif timeunit.upper() == 'MINUTES':
self.timeunit = 'M'
self.time_idx = 1
elif timeunit.upper() == 'HOURS':
self.timeunit = 'H'
self.time_idx = 2
elif timeunit.upper() == 'DAYS':
self.timeunit = 'D'
self.time_idx = 3
elif timeunit.upper() == 'YEARS':
self.timeunit = 'Y'
self.time_idx = 4
else:
raise Exception('need to reset time_idxs attribute to '
'use units other than days and check usage of '
'timedelta')
# Fill budget recarrays
self._load()
self._isvalid = False
if len(self.idx_map) > 0:
self._isvalid = True
# Close the open file
self.f.close()
# return
return
def set_budget_key(self):
raise Exception('Must be overridden...')
def isvalid(self):
"""
Get a boolean indicating if budget data are available in the file.
Returns
-------
out : boolean
Boolean indicating if budget data are available in the file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> valid = mf_list.isvalid()
"""
return self._isvalid
def get_record_names(self):
"""
Get a list of water budget record names in the file.
Returns
-------
out : list of strings
List of unique text names in the binary file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> names = mf_list.get_record_names()
"""
if not self._isvalid:
return None
return self.inc.dtype.names
def get_times(self):
"""
Get a list of unique water budget times in the list file.
Returns
-------
out : list of floats
List contains unique water budget simulation times (totim) in list file.
Examples
--------
>>> mf_list = MfListBudget('my_model.list')
>>> times = mf_list.get_times()
"""
if not self._isvalid:
return None
return self.inc['totim'].tolist()
def get_kstpkper(self):
"""
Get a list of unique stress periods and time steps in the list file
water budgets.
Returns
----------
out : list of (kstp, kper) tuples
List of unique kstp, kper combinations in list file. kstp and
kper values are zero-based.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> kstpkper = mf_list.get_kstpkper()
"""
if not self._isvalid:
return None
kstpkper = []
for kstp, kper in zip(self.inc['time_step'],
self.inc['stress_period']):
kstpkper.append((kstp, kper))
return kstpkper
def get_incremental(self, names=None):
"""
Get a recarray with the incremental water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incremental = mf_list.get_incremental()
"""
if not self._isvalid:
return None
if names is None:
return self.inc
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray)
def get_cumulative(self, names=None):
"""
Get a recarray with the cumulative water budget items in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarray
Numpy recarray with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> cumulative = mf_list.get_cumulative()
"""
if not self._isvalid:
return None
if names is None:
return self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.cum[names].view(np.recarray)
def get_budget(self, names=None):
"""
Get the recarrays with the incremental and cumulative water budget items
in the list file.
Parameters
----------
names : str or list of strings
Selection of column names to return. If names is not None then
totim, time_step, stress_period, and selection(s) will be returned.
(default is None).
Returns
-------
out : recarrays
Numpy recarrays with the water budget items in list file. The
recarray also includes totim, time_step, and stress_period. A
separate recarray is returned for the incremental and cumulative
water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> budget = mf_list.get_budget()
"""
if not self._isvalid:
return None
if names is None:
return self.inc, self.cum
else:
if not isinstance(names, list):
names = [names]
names.insert(0, 'stress_period')
names.insert(0, 'time_step')
names.insert(0, 'totim')
return self.inc[names].view(np.recarray), self.cum[names].view(
np.recarray)
def get_data(self, kstpkper=None, idx=None, totim=None, incremental=False):
"""
Get water budget data from the list file for the specified conditions.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
(default is None).
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
These are zero-based kstp and kper values. (default is None).
totim : float
The simulation time. (default is None).
incremental : bool
Boolean flag used to determine if incremental or cumulative water
budget data for the specified conditions will be returned. If
incremental=True, incremental water budget data will be returned.
If incremental=False, cumulative water budget data will be
returned. (default is False).
Returns
-------
data : numpy recarray
Array has size (number of budget items, 3). Recarray names are 'index',
'value', 'name'.
See Also
--------
Notes
-----
if both kstpkper and totim are None, will return the last entry
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import flopy
>>> mf_list = flopy.utils.MfListBudget("my_model.list")
>>> data = mf_list.get_data(kstpkper=(0,0))
>>> plt.bar(data['index'], data['value'])
>>> plt.xticks(data['index'], data['name'], rotation=45, size=6)
>>> plt.show()
"""
if not self._isvalid:
return None
ipos = None
if kstpkper is not None:
try:
ipos = self.get_kstpkper().index(kstpkper)
except:
pass
elif totim is not None:
try:
ipos = self.get_times().index(totim)
except:
pass
elif idx is not None:
ipos = idx
else:
ipos = -1
if ipos is None:
print('Could not find specified condition.')
print(' kstpkper = {}'.format(kstpkper))
print(' totim = {}'.format(totim))
return None
if incremental:
t = self.inc[ipos]
else:
t = self.cum[ipos]
dtype = np.dtype(
[('index', np.int32), ('value', np.float32), ('name', '|S25')])
v = np.recarray(shape=(len(self.inc.dtype.names[3:])), dtype=dtype)
for i, name in enumerate(self.inc.dtype.names[3:]):
mult = 1.
if '_OUT' in name:
mult = -1.
v[i]['index'] = i
v[i]['value'] = mult * t[name]
v[i]['name'] = name
return v
def get_dataframes(self, start_datetime='1-1-1970',diff=False):
"""
Get pandas dataframes with the incremental and cumulative water budget
items in the list file.
Parameters
----------
start_datetime : str
If start_datetime is passed as None, the rows are indexed on totim.
Otherwise, a DatetimeIndex is set. (default is 1-1-1970).
Returns
-------
out : panda dataframes
Pandas dataframes with the incremental and cumulative water budget
items in list file. A separate pandas dataframe is returned for the
incremental and cumulative water budget entries.
Examples
--------
>>> mf_list = MfListBudget("my_model.list")
>>> incrementaldf, cumulativedf = mf_list.get_dataframes()
"""
try:
import pandas as pd
except Exception as e:
raise Exception(
"ListBudget.get_dataframe() error import pandas: " + \
str(e))
if not self._isvalid:
return None
totim = self.get_times()
if start_datetime is not None:
totim = totim_to_datetime(totim,
start=pd.to_datetime(start_datetime),
timeunit=self.timeunit)
df_flux = pd.DataFrame(self.inc, index=totim).loc[:, self.entries]
df_vol = pd.DataFrame(self.cum, index=totim).loc[:, self.entries]
if not diff:
return df_flux, df_vol
else:
in_names = [col for col in df_flux.columns if col.endswith("_IN")]
out_names = [col for col in df_flux.columns if col.endswith("_OUT")]
#print(in_names,out_names)
#print(df_flux.columns)
base_names = [name.replace("_IN",'') for name in in_names]
for name in base_names:
in_name = name + "_IN"
out_name = name + "_OUT"
df_flux.loc[:,name.lower()] = df_flux.loc[:,in_name] - df_flux.loc[:,out_name]
df_flux.pop(in_name)
df_flux.pop(out_name)
df_vol.loc[:,name.lower()] = df_vol.loc[:,in_name] - df_vol.loc[:,out_name]
df_vol.pop(in_name)
df_vol.pop(out_name)
cols = list(df_flux.columns)
cols.sort()
cols = [col.lower() for col in cols]
df_flux.columns = cols
df_vol.columns = cols
return df_flux, df_vol
def _build_index(self, maxentries):
self.idx_map = self._get_index(maxentries)
return
def _get_index(self, maxentries):
# --parse through the file looking for matches and parsing ts and sp
idxs = []
l_count = 1
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if self.budgetkey in line:
for l in range(self.tssp_lines):
line = self.f.readline()
try:
ts, sp = self._get_ts_sp(line)
except:
print('unable to cast ts,sp on line number', l_count,
' line: ', line)
break
# print('info found for timestep stress period',ts,sp)
idxs.append([ts, sp, seekpoint])
if maxentries and len(idxs) >= maxentries:
break
return idxs
def _seek_to_string(self, s):
"""
Parameters
----------
s : str
Seek through the file to the next occurrence of s. Return the
seek location when found.
Returns
-------
seekpoint : int
Next location of the string
"""
while True:
seekpoint = self.f.tell()
line = self.f.readline()
if line == '':
break
if s in line:
break
return seekpoint
def _get_ts_sp(self, line):
"""
From the line string, extract the time step and stress period numbers.
"""
# Old method. Was not generic enough.
# ts = int(line[self.ts_idxs[0]:self.ts_idxs[1]])
# sp = int(line[self.sp_idxs[0]:self.sp_idxs[1]])
# Get rid of nasty things
line = line.replace(',', '')
searchstring = 'TIME STEP'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
ts = int(ll[0])
searchstring = 'STRESS PERIOD'
idx = line.index(searchstring) + len(searchstring)
ll = line[idx:].strip().split()
sp = int(ll[0])
return ts, sp
def _set_entries(self):
if len(self.idx_map) < 1:
return None, None
if len(self.entries) > 0:
raise Exception('entries already set:' + str(self.entries))
if not self.idx_map:
raise Exception('must call build_index before call set_entries')
try:
incdict, cumdict = self._get_sp(self.idx_map[0][0],
self.idx_map[0][1],
self.idx_map[0][2])
except:
raise Exception('unable to read budget information from first '
'entry in list file')
self.entries = incdict.keys()
null_entries = collections.OrderedDict()
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
for entry in self.entries:
incdict[entry] = []
cumdict[entry] = []
null_entries[entry] = np.NaN
self.null_entries = [null_entries, null_entries]
return incdict, cumdict
def _load(self, maxentries=None):
self._build_index(maxentries)
incdict, cumdict = self._set_entries()
if incdict is None and cumdict is None:
return
totim = []
for ts, sp, seekpoint in self.idx_map:
tinc, tcum = self._get_sp(ts, sp, seekpoint)
for entry in self.entries:
incdict[entry].append(tinc[entry])
cumdict[entry].append(tcum[entry])
# Get the time for this record
seekpoint = self._seek_to_string('TIME SUMMARY AT END')
tslen, sptim, tt = self._get_totim(ts, sp, seekpoint)
totim.append(tt)
# get kstp and kper
idx_array = np.array(self.idx_map)
# build dtype for recarray
dtype_tups = [('totim', np.float32), ("time_step", np.int32),
("stress_period", np.int32)]
for entry in self.entries:
dtype_tups.append((entry, np.float32))
dtype = np.dtype(dtype_tups)
# create recarray
nentries = len(incdict[entry])
self.inc = np.recarray(shape=(nentries,), dtype=dtype)
self.cum = np.recarray(shape=(nentries,), dtype=dtype)
# fill each column of the recarray
for entry in self.entries:
self.inc[entry] = incdict[entry]
self.cum[entry] = cumdict[entry]
# file the totim, time_step, and stress_period columns for the
# incremental and cumulative recarrays (zero-based kstp,kper)
self.inc['totim'] = np.array(totim)[:]
self.inc["time_step"] = idx_array[:, 0] - 1
self.inc["stress_period"] = idx_array[:, 1] - 1
self.cum['totim'] = np.array(totim)[:]
self.cum["time_step"] = idx_array[:, 0] - 1
self.cum["stress_period"] = idx_array[:, 1] - 1
return
def _get_sp(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read to the start of the "in" budget information
while True:
line = self.f.readline()
if line == '':
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
# --if there are two '=' in this line, then it is a budget line
if len(re.findall('=', line)) == 2:
break
tag = 'IN'
incdict = collections.OrderedDict()
cumdict = collections.OrderedDict()
while True:
if line == '':
# raise Exception('end of file found while seeking budget information')
print(
'end of file found while seeking budget information for ts,sp',
ts, sp)
return self.null_entries
if len(re.findall('=', line)) == 2:
try:
entry, flux, cumu = self._parse_budget_line(line)
except e:
print('error parsing budget line in ts,sp', ts, sp)
return self.null_entries
if flux is None:
print(
'error casting in flux for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if cumu is None:
print(
'error casting in cumu for', entry,
' to float in ts,sp',
ts, sp)
return self.null_entries
if entry.endswith(tag.upper()):
if ' - ' in entry.upper():
key = entry.replace(' ', '')
else:
key = entry.replace(' ', '_')
elif 'PERCENT DISCREPANCY' in entry.upper():
key = entry.replace(' ', '_')
else:
key = '{}_{}'.format(entry.replace(' ', '_'), tag)
incdict[key] = flux
cumdict[key] = cumu
else:
if 'OUT:' in line.upper():
tag = 'OUT'
line = self.f.readline()
if entry.upper() == 'PERCENT DISCREPANCY':
break
return incdict, cumdict
def _parse_budget_line(self, line):
# get the budget item name
entry = line.strip().split('=')[0].strip()
# get the cumulative string
idx = line.index('=') + 1
line2 = line[idx:]
ll = line2.strip().split()
cu_str = ll[0]
idx = line2.index('=') + 1
fx_str = line2[idx:].strip()
#
# cu_str = line[self.cumu_idxs[0]:self.cumu_idxs[1]]
# fx_str = line[self.flux_idxs[0]:self.flux_idxs[1]]
flux, cumu = None, None
try:
cumu = float(cu_str)
except:
if 'NAN' in cu_str.strip().upper():
cumu = np.NaN
try:
flux = float(fx_str)
except:
if 'NAN' in fx_str.strip().upper():
flux = np.NaN
return entry, flux, cumu
def _get_totim(self, ts, sp, seekpoint):
self.f.seek(seekpoint)
# --read header lines
ihead = 0
while True:
line = self.f.readline()
ihead += 1
if line == '':
print(
'end of file found while seeking time information for ts,sp',
ts, sp)
return np.NaN, np.NaN, np.Nan
elif ihead == 2 and 'SECONDS MINUTES HOURS DAYS YEARS' not in line:
break
elif '-----------------------------------------------------------' in line:
line = self.f.readline()
break
tslen = self._parse_time_line(line)
if tslen == None:
print('error parsing tslen for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
sptim = self._parse_time_line(self.f.readline())
if sptim == None:
print('error parsing sptim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
totim = self._parse_time_line(self.f.readline())
if totim == None:
print('error parsing totim for ts,sp', ts, sp)
return np.NaN, np.NaN, np.Nan
return tslen, sptim, totim
def _parse_time_line(self, line):
if line == '':
print('end of file found while parsing time information')
return None
try:
time_str = line[self.time_line_idx:]
raw = time_str.split()
idx = self.time_idx
# catch case where itmuni is undefined
# in this case, the table format is different
try:
v = float(raw[0])
except:
time_str = line[45:]
raw = time_str.split()
idx = 0
tval = float(raw[idx])
except:
print('error parsing tslen information', time_str)
return None
return tval
class SwtListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'MASS BUDGET FOR ENTIRE MODEL'
return
class MfListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class MfusgListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC BUDGET FOR ENTIRE MODEL'
return
class SwrListBudget(ListBudget):
"""
"""
def set_budget_key(self):
self.budgetkey = 'VOLUMETRIC SURFACE WATER BUDGET FOR ENTIRE MODEL'
self.tssp_lines = 1
return
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
arahuja/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
xzh86/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
google-research/google-research | learn_to_infer/run_ring.py | 1 | 10211 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for transformer experiments.
"""
import os
from . import metrics
from . import plotting
from . import ring_dist
from . import ring_models
from . import train
from absl import app
from absl import flags
import jax
from jax.config import config
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as onp
flags.DEFINE_integer("num_encoders", 6,
"Number of encoder modules in the transformer.")
flags.DEFINE_integer("num_decoders", 6,
"Number of decoder modules in the transformer.")
flags.DEFINE_integer("num_heads", 8,
"Number of attention heads in the transformer.")
flags.DEFINE_integer("key_dim", 32,
"The dimension of the keys in the transformer.")
flags.DEFINE_integer("value_dim_per_head", 32,
"The dimension of the values in the transformer for each head.")
flags.DEFINE_integer("k", 2,
"The number of modes in the data.")
flags.DEFINE_integer("data_points_per_mode", 25,
"Number of data points to include per mode in the data.")
flags.DEFINE_boolean("parallel", True,
"If possible, train in parallel across devices.")
flags.DEFINE_integer("batch_size", 64,
"The batch size.")
flags.DEFINE_integer("eval_batch_size", 256,
"The batch size for evaluation.")
flags.DEFINE_integer("num_steps", int(1e6),
"The number of steps to train for.")
flags.DEFINE_float("lr", 1e-3,
"The learning rate for ADAM.")
flags.DEFINE_integer("summarize_every", 100,
"Number of steps between summaries.")
flags.DEFINE_integer("checkpoint_every", 5000,
"Number of steps between checkpoints.")
flags.DEFINE_boolean("clobber_checkpoint", False,
"If true, remove any existing summaries and checkpoints in logdir.")
flags.DEFINE_string("logdir", "/tmp/transformer",
"The directory to put summaries and checkpoints.")
flags.DEFINE_boolean("debug_nans", False,
"If true, run in debug mode and fail on nans.")
FLAGS = flags.FLAGS
def make_model(key,
num_encoders=4,
num_decoders=4,
num_heads=8,
value_dim=128,
data_points_per_mode=25,
k=10):
model = ring_models.RingInferenceMachine(
max_k=k,
max_num_data_points=k*data_points_per_mode, num_heads=num_heads,
num_encoders=num_encoders, num_decoders=num_decoders, qkv_dim=value_dim)
params = model.init_params(key)
return model, params
def sample_batch(key, batch_size, k, data_points_per_mode):
keys = jax.random.split(key, num=batch_size)
xs, cs, params = jax.vmap(
ring_dist.sample_params_and_points,
in_axes=(0, None, None, None, None, None, None, None, None,
None))(keys, k * data_points_per_mode, k, 1., 0.5, 2, .02,
jnp.zeros([2]), jnp.eye(2), 0.1)
return xs, cs, params
def make_loss(model,
k=2,
data_points_per_mode=25,
batch_size=128):
def sample_train_batch(key):
xs, _, params = sample_batch(key, batch_size, k, data_points_per_mode)
return xs, params
def loss(params, key):
key, subkey = jax.random.split(key)
xs, ring_params = sample_train_batch(key)
ks = jnp.full([batch_size], k)
losses = model.loss(
params, xs, ks*data_points_per_mode, ring_params, ks, subkey)
return jnp.mean(losses)
return jax.jit(loss)
def make_summarize(
model,
k=2,
data_points_per_mode=25,
eval_batch_size=256):
def sample_eval_batch(key):
return sample_batch(key, eval_batch_size, k, data_points_per_mode)
sample_eval_batch = jax.jit(sample_eval_batch)
def sample_single(key):
xs, cs, params = sample_batch(key, 1, k, data_points_per_mode)
return xs[0], cs[0], (params[0][0], params[1][0], params[2][0],
params[3][0])
def model_classify(params, inputs, batch_size):
return model.classify(params, inputs,
jnp.full([batch_size], k*data_points_per_mode),
jnp.full([batch_size], k))
def sample_and_classify_eval_batch(key, params):
xs, cs, true_ring_params = sample_eval_batch(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs, eval_batch_size)
return xs, cs, true_ring_params, tfmr_cs, tfmr_ring_params
def sample_and_classify_single_mm(key, params):
xs, cs, ring_params = sample_single(key)
tfmr_cs, tfmr_ring_params = model_classify(params, xs[jnp.newaxis], 1)
return xs, cs, ring_params, tfmr_cs, tfmr_ring_params
sample_and_classify_eval_batch = jax.jit(sample_and_classify_eval_batch)
sample_and_classify_single_mm= jax.jit(sample_and_classify_single_mm)
def summarize_baselines(writer, step, key):
key, subkey = jax.random.split(key)
xs, cs, _ = sample_eval_batch(subkey)
ks = onp.full([eval_batch_size], k)
baseline_metrics = metrics.compute_masked_baseline_metrics(
xs, cs, ks, ks*data_points_per_mode)
for method_name, method_metrics in baseline_metrics.items():
for metric_name, metric_val in method_metrics.items():
writer.scalar("%s/%s" % (method_name, metric_name),
metric_val, step=step)
print("%s %s: %0.3f" % (method_name, metric_name, metric_val))
def plot_params(num_data_points, writer, step, params, key):
outs = sample_and_classify_single_mm(key, params)
xs, true_cs, true_params, pred_cs, pred_params = outs
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
fig = plotting.plot_rings(
xs, k, true_cs, true_params, pred_cs, pred_params)
plot_image = plotting.plot_to_numpy_image(plt)
writer.image(
"%d_modes_%d_points" % (k, num_data_points), plot_image, step=step)
plt.close(fig)
def comparison_inference(params):
rings_inputs, true_cs = plotting.make_comparison_rings()
rings_inputs = rings_inputs[jnp.newaxis, Ellipsis]
new_model = ring_models.RingInferenceMachine(
max_k=2, max_num_data_points=1500, num_heads=FLAGS.num_heads,
num_encoders=FLAGS.num_encoders, num_decoders=FLAGS.num_decoders,
qkv_dim=FLAGS.value_dim_per_head*FLAGS.num_heads)
pred_cs, pred_params = new_model.classify(
params, rings_inputs, jnp.array([1500]), jnp.array([2]))
pred_cs = pred_cs[0]
pred_params = (pred_params[0][0], pred_params[1][0],
pred_params[2][0], pred_params[3][0])
return rings_inputs[0], true_cs, pred_cs, pred_params
comparison_inference = jax.jit(comparison_inference)
def plot_sklearn_comparison(writer, step, params):
ring_xs, true_cs, pred_cs, pred_params = comparison_inference(params)
fig = plotting.plot_comparison_rings(ring_xs, true_cs, pred_cs, pred_params)
writer.image(
"sklearn_comparison", plotting.plot_to_numpy_image(plt), step=step)
plt.close(fig)
def summarize(writer, step, params, key):
k1, k2, k3 = jax.random.split(key, num=3)
_, cs, _, tfmr_cs, _ = sample_and_classify_eval_batch(k1, params)
ks = onp.full([eval_batch_size], k)
tfmr_metrics = metrics.compute_masked_metrics(
cs, tfmr_cs, ks, ks*data_points_per_mode,
metrics=["pairwise_accuracy", "pairwise_f1",
"pairwise_macro_f1", "pairwise_micro_f1"])
for metric_name, metric_val in tfmr_metrics.items():
writer.scalar("transformer/%s" % metric_name,
metric_val, step=step)
print("Transformer %s: %0.3f" % (metric_name, metric_val))
plot_params(k*data_points_per_mode, writer, step, params, k2)
plot_sklearn_comparison(writer, step, params)
if step == 0:
summarize_baselines(writer, step, k3)
return summarize
def make_logdir(config):
basedir = config.logdir
exp_dir = (
"ring_nheads_%d_nencoders_%d_ndecoders_%d_num_modes_%d"
% (config.num_heads, config.num_encoders, config.num_decoders, config.k))
return os.path.join(basedir, exp_dir)
def main(unused_argv):
if FLAGS.debug_nans:
config.update("jax_debug_nans", True)
if FLAGS.parallel and train.can_train_parallel():
assert FLAGS.batch_size % jax.local_device_count(
) == 0, "Device count must evenly divide batch_size"
FLAGS.batch_size = int(FLAGS.batch_size / jax.local_device_count())
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
model, init_params = make_model(
key,
num_encoders=FLAGS.num_encoders,
num_decoders=FLAGS.num_decoders,
num_heads=FLAGS.num_heads,
value_dim=FLAGS.value_dim_per_head*FLAGS.num_heads,
data_points_per_mode=FLAGS.data_points_per_mode,
k=FLAGS.k)
loss_fn = make_loss(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
batch_size=FLAGS.batch_size)
summarize_fn = make_summarize(
model,
k=FLAGS.k,
data_points_per_mode=FLAGS.data_points_per_mode,
eval_batch_size=FLAGS.eval_batch_size)
train.train_loop(
subkey,
init_params,
loss_fn,
parallel=FLAGS.parallel,
lr=FLAGS.lr,
num_steps=FLAGS.num_steps,
summarize_fn=summarize_fn,
summarize_every=FLAGS.summarize_every,
checkpoint_every=FLAGS.checkpoint_every,
clobber_checkpoint=FLAGS.clobber_checkpoint,
logdir=make_logdir(FLAGS))
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| artistic-2.0 |
edonyM/emthesis | code/3point2plane.py | 1 | 3545 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-11-30 16:04
#
# Filename: 3point2plane.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('3 point into plane')
ax = fig.gca(projection='3d')
X = np.arange(0, 10, 0.1)
Y = np.arange(0, 10, 0.1)
X, Y = np.meshgrid(X, Y)
Z = 5 - 0.3*X + 0.48*Y
p1 = [5.3, 0.1, 5-0.3*5.3+0.48*0.1]
p2 = [2.3, 0.7, 5-0.3*2.3+0.48*0.7]
p3 = [8.3, 3.1, 5-0.3*8.3+0.48*3.1]
ax.plot_surface(X, Y, Z, rstride=100, cstride=100, alpha=0.3)
ax.scatter(p1[0], p1[1], p1[2])
ax.scatter(p2[0], p2[1], p2[2])
ax.scatter(p3[0], p3[1], p3[2])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
| mit |
lgeiger/ide-python | lib/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydev_ipython/matplotlibtools.py | 8 | 5428 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
matplotlib.real_use = matplotlib.use
matplotlib.use = patched_use
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
matplotlib.real_is_interactive = matplotlib.is_interactive
matplotlib.is_interactive = patched_is_interactive
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| mit |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 18 | 26105 | """
An experimental support for curvilinear grid.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from itertools import chain
from .grid_finder import GridFinder
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from matplotlib.transforms import Affine2D, IdentityTransform
import numpy as np
from matplotlib.path import Path
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FixedAxisArtistHelper, self).__init__( \
loc=side,
)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left":"right","right":"left",
"top":"bottom", "bottom":"top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
#ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, self.side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super(FloatingAxisArtistHelper, self).__init__(nth_coord,
value,
)
self.value = value
self.grid_helper = grid_helper
self._extremes = None, None
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
extremes = list(extremes)
e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
if e1 is not None:
extremes[2] = max(e1, extremes[2])
if e2 is not None:
extremes[3] = min(e2, extremes[3])
elif self.nth_coord == 1:
if e1 is not None:
extremes[0] = max(e1, extremes[0])
if e2 is not None:
extremes[1] = min(e2, extremes[1])
grid_info = dict()
lon_min, lon_max, lat_min, lat_max = extremes
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
grid_info["extremes"] = extremes
grid_info["lon_info"] = lon_levs, lon_n, lon_factor
grid_info["lat_info"] = lat_levs, lat_n, lat_factor
grid_info["lon_labels"] = grid_finder.tick_formatter1("bottom",
lon_factor,
lon_levs)
grid_info["lat_labels"] = grid_finder.tick_formatter2("bottom",
lat_factor,
lat_levs)
grid_finder = self.grid_helper.grid_finder
#e1, e2 = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
xx0 = np.linspace(self.value, self.value, self._line_num_points)
yy0 = np.linspace(extremes[2], extremes[3], self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(extremes[0], extremes[1], self._line_num_points)
yy0 = np.linspace(self.value, self.value, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
grid_info["line_xy"] = xx, yy
self.grid_info = grid_info
def get_axislabel_transform(self, axes):
return Affine2D() #axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2]+extremes[3])/2.
dxx, dyy = 0., abs(extremes[2]-extremes[3])/1000.
elif self.nth_coord == 1:
xx0 = (extremes[0]+extremes[1])/2.
yy0 = self.value
dxx, dyy = abs(extremes[0]-extremes[1])/1000., 0.
grid_finder = self.grid_helper.grid_finder
xx1, yy1 = grid_finder.transform_xy([xx0], [yy0])
trans_passingthrough_point = axes.transData + axes.transAxes.inverted()
p = trans_passingthrough_point.transform_point([xx1[0], yy1[0]])
if (0. <= p[0] <= 1.) and (0. <= p[1] <= 1.):
xx1c, yy1c = axes.transData.transform_point([xx1[0], yy1[0]])
xx2, yy2 = grid_finder.transform_xy([xx0+dxx], [yy0+dyy])
xx2c, yy2c = axes.transData.transform_point([xx2[0], yy2[0]])
return (xx1c, yy1c), np.arctan2(yy2c-yy1c, xx2c-xx1c)/np.pi*180.
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() #axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
if lat_factor is not None:
yy0 = lat_levs / lat_factor
dy = 0.01 / lat_factor
else:
yy0 = lat_levs
dy = 0.01
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
if lon_factor is not None:
xx0 = lon_levs / lon_factor
dx = 0.01 / lon_factor
else:
xx0 = lon_levs
dx = 0.01
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.empty_like(yy0)
xx0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.empty_like(xx0)
yy0.fill(self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0+dx>e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = ((yy1b-yy1a)==0.) & ((xx1b-xx1a)==0.) # mask where dd1 is not defined
dd[mm] = dd2[mm]+3.14159/2.
#dd = np.arctan2(yy2-yy1, xx2-xx1) # angle normal
#dd2 = np.arctan2(yy3-yy1, xx3-xx1) # angle tangent
#mm = ((yy2-yy1)==0.) & ((xx2-xx1)==0.) # mask where dd1 is not defined
#dd[mm] = dd2[mm]+3.14159/2.
#dd += 3.14159
#dd = np.arctan2(xx2-xx1, angle_tangent-yy1)
trans_tick = self.get_tick_transform(axes)
tr2ax = trans_tick + axes.transAxes.inverted()
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tr2ax.transform_point((x, y))
delta=0.00001
if (0. -delta<= c2[0] <= 1.+delta) and \
(0. -delta<= c2[1] <= 1.+delta):
d1 = d/3.14159*180.
d2 = d2/3.14159*180.
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(list(zip(x, y)))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates:
e.g., x2, y2 = trans(x1, y1)
"""
super(GridHelperCurveLinear, self).__init__()
self.grid_info = None
self._old_values = None
#self._grid_params = dict()
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc,
#nth_coord,
nth_coord_ticks=nth_coord,
)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper( \
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
#_helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
#axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
#axisline.major_ticklabels.set_visible(True)
#axisline.minor_ticklabels.set_visible(False)
#axisline.major_ticklabels.set_rotate_along_line(True)
#axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
#axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
#angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
def f():
for (xy, a), l in zip(self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
#for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
return f()
def test3():
import numpy as np
from matplotlib.transforms import Transform
from matplotlib.path import Path
class MyTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y-x), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MyTransformInv(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class MyTransformInv(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
x = ll[:, 0:1]
y = ll[:, 1:2]
return np.concatenate((x, y+x), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MyTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
tr = MyTransform(1)
grid_helper = GridHelperCurveLinear(tr)
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot_class_factory
from .axislines import Axes
SubplotHost = host_subplot_class_factory(Axes)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
fig.add_subplot(ax1)
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
ax1.parasites.append(ax2)
ax2.plot([3, 6], [5.0, 10.])
ax1.set_aspect(1.)
ax1.set_xlim(0, 10)
ax1.set_ylim(0, 10)
ax1.grid(True)
plt.draw()
def curvelinear_test2(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost, \
ParasiteAxesAuxTrans
import matplotlib.cbook as cbook
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(5)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
# make ticklabels of right and top axis visible.
ax1.axis["right"].major_ticklabels.set_visible(True)
ax1.axis["top"].major_ticklabels.set_visible(True)
# let right axis shows ticklabels for 1st coordinate (angle)
ax1.axis["right"].get_helper().nth_coord_ticks=0
# let bottom axis shows ticklabels for 2nd coordinate (radius)
ax1.axis["bottom"].get_helper().nth_coord_ticks=1
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat"] = axis = grid_helper.new_floating_axis(0, 60, axes=ax1)
axis.label.set_text("Test")
axis.label.set_visible(True)
#axis._extremes = 2, 10
#axis.label.set_text("Test")
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.get_helper()._extremes=2, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 6, axes=ax1)
#axis.major_ticklabels.set_visible(False)
#axis.major_ticks.set_visible(False)
axis.label.set_text("Test 2")
axis.get_helper()._extremes=-180, 90
# A parasite axes with given transform
ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# note that ax2.transData == tr + ax1.transData
# Anthing you draw in ax2 will match the ticks and grids of ax1.
ax1.parasites.append(ax2)
intp = cbook.simple_linear_interpolation
ax2.plot(intp(np.array([0, 30]), 50),
intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
def curvelinear_test3(fig):
"""
polar projection, but in a rectangular box.
"""
global ax1, axis
import numpy as np
from . import angle_helper
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
# PolarAxes.PolarTransform takes radian. However, we want our coordinate
# system in degree
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
# polar projection, which involves cycle, and also has limits in
# its coordinates, needs a special method to find the extremes
# (min, max of the coordinate within the view).
# 20, 20 : number of sampling points along x, y direction
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
# Find a grid values appropriate for the coordinate (degree,
# minute, second).
tick_formatter1 = angle_helper.FormatterDMS()
# And also uses an appropriate formatter. Note that,the
# acceptable Locator and Formatter class is a bit different than
# that of mpl's, and you cannot directly use mpl's Locator and
# Formatter here (but may be possible in the future).
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
tick_formatter1=tick_formatter1
)
ax1 = SubplotHost(fig, 1, 1, 1, grid_helper=grid_helper)
for axis in list(six.itervalues(ax1.axis)):
axis.set_visible(False)
fig.add_subplot(ax1)
grid_helper = ax1.get_grid_helper()
ax1.axis["lat1"] = axis = grid_helper.new_floating_axis(0, 130,
axes=ax1,
axis_direction="left"
)
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
grid_helper = ax1.get_grid_helper()
ax1.axis["lat2"] = axis = grid_helper.new_floating_axis(0, 50, axes=ax1,
axis_direction="right")
axis.label.set_text("Test")
axis.label.set_visible(True)
axis.get_helper()._extremes=0.001, 10
ax1.axis["lon"] = axis = grid_helper.new_floating_axis(1, 10,
axes=ax1,
axis_direction="bottom")
axis.label.set_text("Test 2")
axis.get_helper()._extremes= 50, 130
axis.major_ticklabels.set_axis_direction("top")
axis.label.set_axis_direction("top")
grid_helper.grid_finder.grid_locator1.den = 5
grid_helper.grid_finder.grid_locator2._nbins = 5
# # A parasite axes with given transform
# ax2 = ParasiteAxesAuxTrans(ax1, tr, "equal")
# # note that ax2.transData == tr + ax1.transData
# # Anthing you draw in ax2 will match the ticks and grids of ax1.
# ax1.parasites.append(ax2)
# intp = cbook.simple_linear_interpolation
# ax2.plot(intp(np.array([0, 30]), 50),
# intp(np.array([10., 10.]), 50))
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
ax1.grid(True)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(5, 5))
fig.clf()
#test3()
#curvelinear_test2(fig)
curvelinear_test3(fig)
#plt.draw()
plt.show()
| gpl-2.0 |
michaelhuang/QuantSoftwareToolkit | Examples/Basic/tutorial3.py | 4 | 3612 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: sourabhbajaj@gatech.edu
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def main():
''' Main Function'''
# Reading the portfolio
na_portfolio = np.loadtxt('tutorial3portfolio.csv', dtype='S5,f4',
delimiter=',', comments="#", skiprows=1)
print na_portfolio
# Sorting the portfolio by symbol name
na_portfolio = sorted(na_portfolio, key=lambda x: x[0])
print na_portfolio
# Create two list for symbol names and allocation
ls_port_syms = []
lf_port_alloc = []
for port in na_portfolio:
ls_port_syms.append(port[0])
lf_port_alloc.append(port[1])
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
ls_all_syms = c_dataobj.get_all_symbols()
# Bad symbols are symbols present in portfolio but not in all syms
ls_bad_syms = list(set(ls_port_syms) - set(ls_all_syms))
if len(ls_bad_syms) != 0:
print "Portfolio contains bad symbols : ", ls_bad_syms
for s_sym in ls_bad_syms:
i_index = ls_port_syms.index(s_sym)
ls_port_syms.pop(i_index)
lf_port_alloc.pop(i_index)
# Reading the historical data.
dt_end = dt.datetime(2011, 1, 1)
dt_start = dt_end - dt.timedelta(days=1095) # Three years
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_port_syms, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Copying close price into separate dataframe to find rets
df_rets = d_data['close'].copy()
# Filling the data.
df_rets = df_rets.fillna(method='ffill')
df_rets = df_rets.fillna(method='bfill')
df_rets = df_rets.fillna(1.0)
# Numpy matrix of filled data values
na_rets = df_rets.values
# returnize0 works on ndarray and not dataframes.
tsu.returnize0(na_rets)
# Estimate portfolio returns
na_portrets = np.sum(na_rets * lf_port_alloc, axis=1)
na_port_total = np.cumprod(na_portrets + 1)
na_component_total = np.cumprod(na_rets + 1, axis=0)
# Plotting the results
plt.clf()
fig = plt.figure()
fig.add_subplot(111)
plt.plot(ldt_timestamps, na_component_total, alpha=0.4)
plt.plot(ldt_timestamps, na_port_total)
ls_names = ls_port_syms
ls_names.append('Portfolio')
plt.legend(ls_names)
plt.ylabel('Cumulative Returns')
plt.xlabel('Date')
fig.autofmt_xdate(rotation=45)
plt.savefig('tutorial3.pdf', format='pdf')
if __name__ == '__main__':
main()
| bsd-3-clause |
LeSam/avoplot | src/avoplot/gui/analysis_tools.py | 3 | 4491 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
"""
This module is still under construction! Eventually, it will contain a set of
data analysis tools for working with data.
"""
#The DataFollower class is still under construction - come back soon!
#class DataFollower:
# def __init__(self):
# self.line = None
#
# def connect(self, axes):
# self.axes = axes
#
# self.cid = self.axes.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
#
# def on_motion(self, event):
# if event.inaxes != self.axes: return
# if self.line is None:
# self.line, = self.axes.plot([event.xdata] * 2, self.axes.get_ylim(), 'k-')
# self.line.set_animated(True)
#
#
# trans = self.line.get_transform()
# inv_trans = trans.inverted()
#
# x0, y0 = inv_trans.transform_point([event.xdata - 10, self.axes.get_ylim()[1]])
# x1, y1 = inv_trans.transform_point([event.xdata + 10, self.axes.get_ylim()[0]])
# print "untransformed 0 = ", x0, y0
# print "untransformed 0 = ", x1, y1
# #add the line width to it
# #x0,y0 = trans.transform_point([x0-(self.line.get_linewidth()/2.0),y0])
# #x1,y1 = trans.transform_point([x1+(self.line.get_linewidth()/2.0),y1])
#
# #print "transformed 0 = ",x0,y0
# #print "transformed 0 = ",x1,y1
# bbox = matplotlib.transforms.Bbox([[x0, y0], [x1, y1]])
# #bbox.update_from_data_xy([[x0,y0],[x1,y1]])
# self.background = self.axes.figure.canvas.copy_from_bbox(self.line.axes.bbox)
# print self.background
# self.region_to_restore = bbox
# print bbox
#
# #print self.line.axes.bbox
# #print self.line.axes.bbox.bbox.update_from_data(numpy.array([[event.xdata-10, event.xdata+10],[event.xdata+10, self.axes.get_ylim()[1]]]))
# #print self.line.axes.bbox
# else:
# self.line.set_xdata([event.xdata] * 2,)
# self.line.set_ydata(self.line.axes.get_ylim())
#
## x0, xpress, ypress = self.press
## dx = event.xdata - xpress
## dy = event.ydata - ypress
##
## self.line.set_xdata([x0[0] + dx]*2)
## self.line.set_ydata(self.line.axes.get_ylim())
## self.line.set_linestyle('--')
##
# canvas = self.line.figure.canvas
# axes = self.line.axes
## # restore the background region
# self.axes.figure.canvas.restore_region(self.background, bbox=self.region_to_restore)
##
## # redraw just the current rectangle
# axes.draw_artist(self.line)
##
## # blit just the redrawn area
# canvas.blit(self.axes.bbox)
#
# trans = self.line.get_transform()
# inv_trans = trans.inverted()
#
# x0, y0 = self.axes.transData.transform([event.xdata - 10, self.axes.get_ylim()[1]])
# x1, y1 = self.axes.transData.transform([event.xdata + 10, self.axes.get_ylim()[0]])
# print "untransformed 0 = ", x0, y0
# print "untransformed 0 = ", x1, y1
# #add the line width to it
# #x0,y0 = trans.transform_point([x0-(self.line.get_linewidth()/2.0),y0])
# #x1,y1 = trans.transform_point([x1+(self.line.get_linewidth()/2.0),y1])
#
# #print "transformed 0 = ",x0,y0
# #print "transformed 0 = ",x1,y1
# bbox = matplotlib.transforms.Bbox([[x0, y0], [x1, y1]])
# #bbox.update_from_data_xy([[x0,y0],[x1,y1]])
# self.region_to_restore = bbox
#
#
# def disconnect(self):
# self.axes.figure.canvas.mpl_disconnect(self.cid)
# self.axes.figure.canvas.restore_region(self.background)
# self.axes.figure.canvas.blit(self.axes.bbox)
# pass | gpl-3.0 |
OTAkeys/RIOT | tests/pkg_utensor/generate_digit.py | 19 | 1149 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 28x28.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (mnist_test, _) = tf.keras.datasets.mnist.load_data()
data = mnist_test[args.index]
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(28, 28))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
abyssxsy/gnuradio | gr-utils/python/utils/plot_fft_base.py | 53 | 10449 | #!/usr/bin/env python
#
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_fft_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = getattr(scipy, datatype)
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.94, ("File: %s" % filename), weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_iq.get_xlim()
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % (self.position))
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.iq_fft = self.dofft(self.iq)
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.freq = self.calc_freq(self.time, self.sample_rate)
def dofft(self, iq):
N = len(iq)
iq_fft = scipy.fftpack.fftshift(scipy.fft(iq)) # fft and shift axis
iq_fft = 20*scipy.log10(abs((iq_fft+1e-15)/N)) # convert to decibels, adjust power
# adding 1e-15 (-300 dB) to protect against value errors if an item in iq_fft is 0
return iq_fft
def calc_freq(self, time, sample_rate):
N = len(time)
Fs = 1.0 / (time.max() - time.min())
Fn = 0.5 * sample_rate
freq = scipy.array([-Fn + i*Fs for i in xrange(N)])
return freq
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=[0.075, 0.2, 0.4, 0.6])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for FFT plot
self.sp_fft = self.fig.add_subplot(2,2,2, position=[0.575, 0.2, 0.4, 0.6])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time() # draw the plot
self.plot_fft = self.sp_fft.plot([], 'bo-') # make plot for FFT
self.draw_fft() # draw the plot
draw()
def draw_time(self):
reals = self.iq.real
imags = self.iq.imag
self.plot_iq[0].set_data([self.time, reals])
self.plot_iq[1].set_data([self.time, imags])
self.sp_iq.set_xlim(self.time.min(), self.time.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_fft(self):
self.plot_fft[0].set_data([self.freq, self.iq_fft])
self.sp_fft.set_xlim(self.freq.min(), self.freq.max())
self.sp_fft.set_ylim([self.iq_fft.min()-10, self.iq_fft.max()+10])
def update_plots(self):
self.draw_time()
self.draw_fft()
self.xlim = self.sp_iq.get_xlim()
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
self.xlim = newxlim
#xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))
iq = self.iq[xmin : xmax]
time = self.time[xmin : xmax]
iq_fft = self.dofft(iq)
freq = self.calc_freq(time, self.sample_rate)
self.plot_fft[0].set_data(freq, iq_fft)
self.sp_fft.axis([freq.min(), freq.max(),
iq_fft.min()-10, iq_fft.max()+10])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
self.get_data()
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio complex binary file and displays the I&Q data versus time as well as the frequency domain (FFT) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. This value defaults to 1000. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples."
parser = OptionParser(conflict_handler="resolve", usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=1000,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_fft_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_fft_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
rfriesen/DR1_analysis | property_histograms.py | 2 | 7527 | from astropy.io import fits
import aplpy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import astropy.units as u
import astropy.constants as c
import warnings
import numpy as np
from astropy.visualization import hist
from config import plottingDictionary
"""
Make histogram plots of NH3-derived properties for DR1 regions
"""
def mask_hist(par_data,epar_data,epar_lim,par_max,par_min=0):
mask1 = np.isfinite(epar_data)
mask2 = epar_data < epar_lim
mask3 = epar_data > 0
mask4 = par_data < par_max
mask5 = par_data > par_min
return par_data * mask1 * mask2 * mask3 * mask4 * mask5
region_list = ['B18','NGC1333','L1688','OrionA']
par_list = ['Vlsr','Sigma','Tkin','Tex','N_NH3']
epar_ext = [10,9,6,7,8]
epar_limits = [0.05,0.1,1,2,0.5]
extension = 'DR1_rebase3'
label_list=['$v_{LSR}$ (km s$^{-1}$)','$\sigma_v$ (km s$^{-1}$)','$T_K$ (K)','$T_{ex}$ (K)','log N(para-NH$_3$) (cm$^{-2}$)']
label_short = ['$v_{LSR}$','$\sigma_v$','$T_K$','$T_{ex}$','log N(para-NH$_3$)']
plot_colours = ['black','blue','green','orange']
#plot_colours = ['black','darkblue','blue','cornflowerblue']
#plot_colours = ['#a6cee3', '#fdbf6f', '#33a02c', '#fb9a99']
hist_minx_list = [2,0,5,2.7,13]
hist_maxx_list = [13,1.5,37,12,15.7]
hist_maxy_list = [2.1,8,0.65,1.6,2]
ytick_int_maj = [0.4,2,0.2,0.4,0.5]
ytick_int_min = [0.1,0.5,0.025,0.1,0.25]
dataDir = ''
hist_kwds1 = dict(histtype='stepfilled',alpha=0.2,normed=True)
# Separate regions in plots to better show distributions
for par_i in range(len(par_list)):
fig,axes = plt.subplots(len(region_list),1,figsize=(4,5))
par = par_list[par_i]
label = label_list[par_i]
ylabel = label_short[par_i]
for i, ax in enumerate(fig.axes):
region_i = i
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_{1}_{2}_flag.fits'.format(region,par,extension)
epar_file = dataDir + '{0}/{0}_parameter_maps_{1}_trim.fits'.format(region,extension)
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data[epar_ext[par_i],:,:]
epar_hdu.close()
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
pmin_list = plot_param['pmin_list']
pmax_list = plot_param['pmax_list']
pmin = np.max([pmin_list[par_i],np.nanmin(par_data)])
pmax = np.min([pmax_list[par_i],np.nanmax(par_data)])
par_masked = mask_hist(par_data,epar_data,epar_limits[par_i],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
if par == 'Vlsr':
bin_width = 0.3
nbins = np.int((np.max(par_masked) - np.min(par_masked !=0))/bin_width)
hist(par_masked[par_masked !=0],bins=nbins,ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
else:
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
if (i+1) != len(region_list):
ax.set_xticklabels([])
ax.set_xlim(hist_minx_list[par_i],hist_maxx_list[par_i])
#ax.set_ylim(0,hist_maxy_list[par_i])
if par == 'Tkin':
if region == 'OrionA' or region == 'L1688' or region == 'NGC1333':
ax.set_ylim(0,0.25)
ax.yaxis.set_major_locator(ticker.MultipleLocator(ytick_int_maj[par_i]))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(ytick_int_min[par_i]))
ax.annotate('{0}'.format(region),xy=(0.97,0.7),xycoords='axes fraction',horizontalalignment='right')
#ax.legend(frameon=False)
ax.set_xlabel(label)
fig.text(0.001,0.5,'P ({0})'.format(ylabel),va='center',rotation='vertical')
#fig.tight_layout()
fig.savefig('figures/{0}_histogram_separated.pdf'.format(par))
plt.close('all')
# Same plot for X(NH3)
# Need to apply Tex-based mask to get rid of some noisy data
fig,axes = plt.subplots(len(region_list),1,figsize=(4,5))
for i, ax in enumerate(fig.axes):
region_i = i
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_XNH3_{1}.fits'.format(region,extension)
epar_file = dataDir + '{0}/parameterMaps/{0}_eTex_{1}_flag.fits'.format(region,extension)
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data
epar_hdu.close()
pmin = -9.5
pmax = -6.5
#par_data[par_data == 0] = np.nan
par_masked = mask_hist(par_data,epar_data,epar_limits[3],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
color=plot_colours[region_i],label=region,normed=True)
if (i+1) != len(region_list):
ax.set_xticklabels([])
ax.set_xlim(pmin,pmax)
#ax.set_ylim(0,hist_maxy_list[par_i])
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
ax.annotate('{0}'.format(region),xy=(0.97,0.7),xycoords='axes fraction',horizontalalignment='right')
#ax.legend(frameon=False)
ax.set_xlabel('log $X$(NH$_3$)')
fig.text(0.01,0.5,'P($X$(NH$_3$))',va='center',rotation='vertical')
#fig.tight_layout()
fig.savefig('figures/XNH3_histogram_separated.pdf',bbox_inches='tight')
plt.close('all')
'''
# All together
fig = plt.figure(figsize=(6.5,8))
for par_i in range(len(par_list)):
par = par_list[par_i]
label = label_list[par_i]
#fig = plt.figure()
#ax = plt.gca()
ax = plt.subplot(3,2,par_i+1)
for region_i in range(len(region_list)):
region = region_list[region_i]
plot_param=plottingDictionary[region]
par_file = dataDir + '{0}/parameterMaps/{0}_{1}_{2}_flag.fits'.format(region,par,extension)
epar_file = dataDir + '{0}/{0}_parameter_maps_{1}_trim.fits'.format(region,extension)
epar_hdu = fits.open(epar_file)
epar_data = epar_hdu[0].data[epar_ext[par_i],:,:]
epar_hdu.close()
par_hdu = fits.open(par_file)
par_data = par_hdu[0].data
par_hdu.close()
pmin_list = plot_param['pmin_list']
pmax_list = plot_param['pmax_list']
pmin = np.max([pmin_list[par_i],np.nanmin(par_data)])
pmax = np.min([pmax_list[par_i],np.nanmax(par_data)])
par_masked = mask_hist(par_data,epar_data,epar_limits[par_i],pmax,par_min=pmin)
par_masked = par_masked[np.isfinite(par_masked)]
if par == 'Vlsr':
bin_width = 0.3
nbins = np.int((np.max(par_masked) - np.min(par_masked !=0))/bin_width)
hist(par_masked[par_masked !=0],bins=nbins,ax=ax,histtype='stepfilled',alpha=0.3,
normed=True,color=plot_colours[region_i],label=region)
else:
hist(par_masked[par_masked !=0],bins='knuth',ax=ax,histtype='stepfilled',alpha=0.3,
normed=True,color=plot_colours[region_i],label=region)
ax.set_xlabel(label)
ax.set_ylabel('P(t)')
#ax.set_ylabel('N')
ax.set_xlim(hist_minx_list[par_i],hist_maxx_list[par_i])
ax.set_ylim(0,hist_maxy_list[par_i])
#ax.legend(frameon=False)
#fig.savefig('figures/{0}_histogram_number.pdf'.format(par))
ax.legend(frameon=False,bbox_to_anchor=(2.0,1.0))
fig.tight_layout()
fig.savefig('figures/all_histograms.pdf')
plt.close('all')
'''
| mit |
bsipocz/statsmodels | statsmodels/graphics/plot_grids.py | 33 | 5711 | '''create scatterplot with confidence ellipsis
Author: Josef Perktold
License: BSD-3
TODO: update script to use sharex, sharey, and visible=False
see http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label
for sharex I need to have the ax of the last_row when editing the earlier
rows. Or you axes_grid1, imagegrid
http://matplotlib.sourceforge.net/mpl_toolkits/axes_grid/users/overview.html
'''
from statsmodels.compat.python import range
import numpy as np
from scipy import stats
from . import utils
__all__ = ['scatter_ellipse']
def _make_ellipse(mean, cov, ax, level=0.95, color=None):
"""Support function for scatter_ellipse."""
from matplotlib.patches import Ellipse
v, w = np.linalg.eigh(cov)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(v * stats.chi2.ppf(level, 2)) #get size corresponding to level
ell = Ellipse(mean[:2], v[0], v[1], 180 + angle, facecolor='none',
edgecolor=color,
#ls='dashed', #for debugging
lw=1.5)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
def scatter_ellipse(data, level=0.9, varnames=None, ell_kwds=None,
plot_kwds=None, add_titles=False, keep_ticks=False,
fig=None):
"""Create a grid of scatter plots with confidence ellipses.
ell_kwds, plot_kdes not used yet
looks ok with 5 or 6 variables, too crowded with 8, too empty with 1
Parameters
----------
data : array_like
Input data.
level : scalar, optional
Default is 0.9.
varnames : list of str, optional
Variable names. Used for y-axis labels, and if `add_titles` is True
also for titles. If not given, integers 1..data.shape[1] are used.
ell_kwds : dict, optional
UNUSED
plot_kwds : dict, optional
UNUSED
add_titles : bool, optional
Whether or not to add titles to each subplot. Default is False.
Titles are constructed from `varnames`.
keep_ticks : bool, optional
If False (default), remove all axis ticks.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
"""
fig = utils.create_mpl_fig(fig)
import matplotlib.ticker as mticker
data = np.asanyarray(data) #needs mean and cov
nvars = data.shape[1]
if varnames is None:
#assuming single digit, nvars<=10 else use 'var%2d'
varnames = ['var%d' % i for i in range(nvars)]
plot_kwds_ = dict(ls='none', marker='.', color='k', alpha=0.5)
if plot_kwds:
plot_kwds_.update(plot_kwds)
ell_kwds_= dict(color='k')
if ell_kwds:
ell_kwds_.update(ell_kwds)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
for i in range(1, nvars):
#print '---'
ax_last=None
for j in range(i):
#print i,j, i*(nvars-1)+j+1
ax = fig.add_subplot(nvars-1, nvars-1, (i-1)*(nvars-1)+j+1)
## #sharey=ax_last) #sharey doesn't allow empty ticks?
## if j == 0:
## print 'new ax_last', j
## ax_last = ax
## ax.set_ylabel(varnames[i])
#TODO: make sure we have same xlim and ylim
formatter = mticker.FormatStrFormatter('% 3.1f')
ax.yaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(formatter)
idx = np.array([j,i])
ax.plot(*data[:,idx].T, **plot_kwds_)
if np.isscalar(level):
level = [level]
for alpha in level:
_make_ellipse(dmean[idx], dcov[idx[:,None], idx], ax, level=alpha,
**ell_kwds_)
if add_titles:
ax.set_title('%s-%s' % (varnames[i], varnames[j]))
if not ax.is_first_col():
if not keep_ticks:
ax.set_yticks([])
else:
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
else:
ax.set_ylabel(varnames[i])
if ax.is_last_row():
ax.set_xlabel(varnames[j])
else:
if not keep_ticks:
ax.set_xticks([])
else:
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
dcorr = np.corrcoef(data, rowvar=0)
dc = dcorr[idx[:,None], idx]
xlim = ax.get_xlim()
ylim = ax.get_ylim()
## xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## if dc[1,0] < 0 :
## yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
## else:
## yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
yrangeq = ylim[0] + 0.4 * (ylim[1] - ylim[0])
if dc[1,0] < -0.25 or (dc[1,0] < 0.25 and dmean[idx][1] > yrangeq):
yt = ylim[0] + 0.1 * (ylim[1] - ylim[0])
else:
yt = ylim[1] - 0.2 * (ylim[1] - ylim[0])
xt = xlim[0] + 0.1 * (xlim[1] - xlim[0])
ax.text(xt, yt, '$\\rho=%0.2f$'% dc[1,0])
for ax in fig.axes:
if ax.is_last_row(): # or ax.is_first_col():
ax.xaxis.set_major_locator(mticker.MaxNLocator(3))
if ax.is_first_col():
ax.yaxis.set_major_locator(mticker.MaxNLocator(3))
return fig
| bsd-3-clause |
jereze/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
nhmc/LAE | cloudy/find_par.py | 1 | 13374 | from __future__ import division
from math import log, sqrt, pi
from barak.utilities import adict
from barak.absorb import split_trans_name
from barak.io import parse_config, loadobj
from barak.interp import AkimaSpline, MapCoord_Interpolator
from cloudy.utils import read_observed
import numpy as np
import os
from glob import glob
from barak.plot import get_nrows_ncols, puttext
from matplotlib.ticker import AutoMinorLocator
import astropy.constants as c
import astropy.units as u
from astropy.table import Table
import pylab as plt
import sys
# dex 1 sigma error in UVB (and so nH)
Unorm_sig = 0.3
USE_HEXBIN = True
def make_cmap_red():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,9)
cm = plt.cm.Reds(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('red_nhmc', cdict)
def make_cmap_blue():
from matplotlib.colors import LinearSegmentedColormap
x = np.linspace(0,1,15)
cm = plt.cm.Blues(x)
r,g,b = cm[:,0], cm[:,1], cm[:,2]
g[0] = 1
b[0] = 1
r[1:10] = r[4:13]
g[1:10] = g[4:13]
b[1:10] = b[4:13]
cdict = dict(red=zip(x, r, r), green=zip(x, g, g), blue=zip(x, b, b))
return LinearSegmentedColormap('blue_nhmc', cdict)
def find_min_interval(x, alpha):
""" Determine the minimum interval containing a given probability.
x is an array of parameter values (such as from an MCMC trace).
alpha (0 -> 1) is the desired probability encompassed by the
interval.
Inspired by the pymc function of the same name.
"""
assert len(x) > 1
x = np.sort(x)
# Initialize interval
min_int = None, None
# Number of elements in trace
n = len(x)
# Start at far left
end0 = int(n*alpha)
start, end = 0, end0
# Initialize minimum width to large value
min_width = np.inf
for i in xrange(n - end0):
hi, lo = x[end+i], x[start+i]
width = hi - lo
if width < min_width:
min_width = width
min_int = lo, hi
return min_int
def make_interpolators_uvbtilt(trans, simnames):
""" Make interpolators including different UV slopes, given by the
simulation names.
simname naming scheme should be (uvb_k00, uvb_k01, uvb_k02, ...),
uvb k values must be sorted in ascending order!
"""
Models = []
aUV = []
for simname in simnames:
# need to define prefix, SIMNAME
gridname = os.path.join(simname, 'grid.cfg')
#print 'Reading', gridname
cfg = parse_config(gridname)
aUV.append(cfg.uvb_tilt)
name = os.path.join(simname, cfg.prefix + '_grid.sav.gz')
#print 'Reading', name
M = loadobj(name)
M = adict(M)
Uconst = (M.U + M.nH)[0]
#print 'Uconst', Uconst, cfg.uvb_tilt
assert np.allclose(Uconst, M.U + M.nH)
Models.append(M)
##########################################################################
# Interpolate cloudy grids onto a finer scale for plotting and
# likelihood calculation
##########################################################################
roman_map = {'I':0, 'II':1, 'III':2, 'IV':3, 'V':4, 'VI':5,
'VII':6, 'VIII':7, 'IX':8, 'X':9, '2':2}
Ncloudy = {}
Ncloudy_raw = {}
#print 'Interpolating...'
for tr in trans:
shape = len(M.NHI), len(M.nH), len(M.Z), len(aUV)
Nvals = np.zeros(shape)
if tr == 'Tgas':
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M['Tgas'][:,:,:,0]
elif tr == 'NH':
for i,M in enumerate(Models):
logNHI = M.N['H'][:,:,:,0]
logNHII = M.N['H'][:,:,:,1]
logNHtot = np.log10(10**logNHI + 10**logNHII)
Nvals[:,:,:,i] = logNHtot
elif tr in ['CII*']:
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.Nex[tr][:,:,:]
else:
atom, stage = split_trans_name(tr)
ind = roman_map[stage]
for i,M in enumerate(Models):
Nvals[:,:,:,i] = M.N[atom][:,:,:,ind]
# use ndimage.map_coordinates (which is spline interpolation)
coord = M.NHI, M.nH, M.Z, aUV
try:
Ncloudy[tr] = MapCoord_Interpolator(Nvals, coord)
except:
import pdb; pdb.set_trace()
Ncloudy_raw[tr] = Nvals
#print 'done'
return Ncloudy, Ncloudy_raw, Models, np.array(aUV, np.float)
def triplot(names, vals, sigvals, fig, indirect={}, labels=None, fontsize=14):
from barak.plot import hist_yedge, hist_xedge, puttext
npar = len(names)
bins = {}
for n in names:
x0, x1 = vals[n].min(), vals[n].max()
dx = x1 - x0
lo = x0 - 0.1*dx
hi = x1 + 0.1*dx
bins[n] = np.linspace(lo, hi, 20)
axes = {}
for i0,n0 in enumerate(names):
for i1,n1 in enumerate(names):
if i0 == i1:# or i1 < i0: # uncomment to keep just one triangle.
continue
ax = fig.add_subplot(npar,npar, i0 * npar + i1 + 1)
ax.locator_params(tight=True, nbins=8)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
axes[(n0 + ' ' + n1)] = ax
y,x = vals[n0], vals[n1]
if USE_HEXBIN:
ax.hexbin(x,y,cmap=CM, gridsize=40,linewidths=0.1)
else:
ax.plot(x,y,'r.', ms=0.5, mew=0)#, alpha=0.5)
color = 'k' if n0 not in indirect else 'g'
text = labels[n0] if labels is not None else n0
puttext(0.05, 0.95, text, ax, color=color ,fontsize=fontsize, va='top')
color = 'k' if n1 not in indirect else 'g'
text = labels[n1] if labels is not None else n1
puttext(0.95, 0.08, text, ax, color=color ,fontsize=fontsize, ha='right')
# set limits
y0, y1 = np.percentile(vals[n0], [5, 95])
dy = y1 - y0
ax.set_ylim(y0 - dy, y1 + dy)
x0, x1 = np.percentile(vals[n1], [5, 95])
dx = x1 - x0
ax.set_xlim(x0 - dx, x1 + dx)
c = 'k'
if i0 == 0:
ax.xaxis.set_tick_params(labeltop='on')
ax.xaxis.set_tick_params(labelbottom='off')
for t in ax.get_xticklabels():
t.set_rotation(60)
elif i0 == npar-1 or (i0 == npar-2 and i1 == npar-1):
hist_xedge(vals[n1], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n1], loc='bottom')
ax.axvline(sigvals[n1][0], ymax=0.2, color=c, lw=0.5)
ax.axvline(sigvals[n1][1], ymax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n1]) / 2.
ax.axvline(cen, ymax=0.2, color=c, lw=1.5)
for t in ax.get_xticklabels():
t.set_rotation(60)
else:
ax.set_xticklabels('')
if not (i1 == 0 or (i0 == 0 and i1 == 1) or i1 == npar-1):
ax.set_yticklabels('')
if (i0 == 0 and i1 == 1) or i1 == 0:
hist_yedge(vals[n0], ax, color='forestgreen',
fill=dict(color='forestgreen',alpha=0.3),
bins=bins[n0], loc='left')
ax.axhline(sigvals[n0][0], xmax=0.2, color=c, lw=0.5)
ax.axhline(sigvals[n0][1], xmax=0.2, color=c, lw=0.5)
cen = sum(sigvals[n0]) / 2.
ax.axhline(cen, xmax=0.2, color=c, lw=1.5)
if i1 == npar - 1:
ax.yaxis.set_tick_params(labelright='on')
ax.yaxis.set_tick_params(labelleft='off')
#ax.minorticks_on()
return axes
if 1:
print_header = False
if len(sys.argv[1:]) > 0 and sys.argv[1] == '--header':
print_header = True
if 1:
##################################################
# Read configuration file, set global variables
##################################################
testing = 0
cfgname = 'model.cfg'
# we only need the cfg file for the prefix of the cloudy runs and
# the name of the file with the observed column densities.
opt = parse_config(cfgname)
simnames = sorted(glob(opt['simname']))
#print opt['simname']
#print simnames
#CM = make_cmap_blue() # plt.cm.binary
#CM = make_cmap_red() # plt.cm.binary
CM = plt.cm.gist_heat_r # plt.cm.binary
#CM = plt.cm.afmhot_r # plt.cm.binary
#CM = plt.cm.bone_r # plt.cm.binary
#CM = plt.cm.terrain_r # plt.cm.binary
#CM = plt.cm.ocean_r # plt.cm.binary
trans = 'Tgas', 'NH'
if 1:
################################################################
# Read the cloudy grids and make the interpolators
################################################################
Ncloudy, Ncloudy_raw, Models, aUV = make_interpolators_uvbtilt(
trans, simnames)
M = Models[0]
#import pdb; pdb.set_trace()
Uconst_vals = []
for model in Models:
Uconst_vals.append((model['U'] + model['nH'])[0])
# note it's a function of aUV!
Uconst = AkimaSpline(aUV, Uconst_vals)
# Now find the parameter chains
samples = loadobj('samples_mcmc.sav.gz')
nwalkers, nsamples, npar = samples['chain'].shape
parvals = samples['chain'].reshape(-1, npar)
PAR = samples['par']
assert PAR['names'][-1] == 'aUV'
assert PAR['names'][-2] == 'Z'
assert PAR['names'][-3] == 'nH'
assert PAR['names'][-4] == 'NHI'
aUV = parvals[:,-1]
logZ = parvals[:,-2]
lognH = parvals[:,-3]
logNHI = parvals[:,-4]
logU = Uconst(aUV) - lognH
#import pdb; pdb.set_trace()
# call the interpolators with these parameter values.
logT = Ncloudy['Tgas'](parvals[:,-4:].T)
logNtot = Ncloudy['NH'](parvals[:,-4:].T)
# note this is log of D in kpc
logD = logNtot - lognH - np.log10(c.kpc.to(u.cm).value)
logP = logT + lognH
#import pdb; pdb.set_trace()
H_massfrac = 0.76 # (1 / mu)
# Joe's mass calculation
mass = 4./3. * pi * (3./4. * 10**logD * u.kpc)**3 * 10**lognH * \
u.cm**-3 * u.M_p / H_massfrac
# D = NH / nH
logM = np.log10(mass.to(u.M_sun).value)
if 1:
# print out the results and uncertainties
vals = dict(U=logU, T=logT, N=logNtot, D=logD, P=logP, M=logM,
nH=lognH, aUV=aUV, NHI=logNHI, Z=logZ)
levels = 0.6827, 0.9545
sigvals = {}
for key in vals:
sigvals[key] = find_min_interval(vals[key], levels[0])
if print_header:
print r'$\log(Z/Z_\odot)$&$\alpha_{UV}$ & $\log \nH$ & $\log U$& $\log \NHI$ & $\log \NH$& $\log T$ & $\log (P/k)$& $\log D$ & $\log M$ \\'
print r' & & (\cmmm) & & (\cmm) & (\cmm) & (K) & (\cmmm K) & (kpc) & (\msun) \\'
s = ''
ALLPAR = 'Z aUV nH U NHI N T P D M'.split()
for key in ALLPAR:
sig = 0.5 * (sigvals[key][1] - sigvals[key][0])
val = 0.5 * (sigvals[key][1] + sigvals[key][0])
if key in {'nH', 'D', 'P'}:
sig1 = np.hypot(sig, Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
elif key == 'M':
sig1 = np.hypot(sig, 2*Unorm_sig)
s += '$%.2f\\pm%.2f(%.2f)$ &' % (val, sig1, sig)
else:
s += '$%.2f\\pm%.2f$ &' % (val, sig)
print s[:-1] + r'\\'
if 1:
labels = dict(U='$U$', Z='$Z$', NHI='$N_\mathrm{HI}$', aUV=r'$\alpha_\mathrm{UV}$',
T='$T$', P='$P$', N='$N_\mathrm{H}$', D='$D$', M='$Mass$')
if 0:
fig = plt.figure(figsize=(12,12))
fig.subplots_adjust(left=0.05, bottom=0.05, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
names = 'U Z NHI aUV T P N D M'.split()
#direct = 'U Z NHI aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels)
plt.savefig('par.png', dpi=200)
if 1:
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(left=0.095, bottom=0.105, top=0.94,right=0.94, wspace=1e-4,hspace=1e-4)
plt.rc('xtick', labelsize=9.5)
plt.rc('ytick', labelsize=9.5)
names = 'U Z N aUV'.split()
axes = triplot(names, vals, sigvals, fig, labels=labels, fontsize=16)
axes['U Z'].set_ylabel('$\log_{10}U$')
axes['Z U'].set_ylabel('$\log_{10}[Z/Z_\odot]$')
axes['N U'].set_ylabel('$\log_{10}N_\mathrm{H}$')
axes['aUV U'].set_ylabel(r'$\log_{10}\alpha_\mathrm{UV}$')
axes['aUV U'].set_xlabel('$\log_{10}U$')
axes['aUV Z'].set_xlabel('$\log_{10}[Z/Z_\odot]$')
axes['aUV N'].set_xlabel('$\log_{10}N_\mathrm{H}$')
axes['N aUV'].set_xlabel(r'$\log_{10}\alpha_\mathrm{UV}$')
# special case:
if os.path.abspath('.') == '/Users/ncrighton/Projects/MPIA_QSO_LBG/Cloudy/J0004_NHI_2/comp1/final':
for k in ('N U', 'N Z', 'N aUV'):
axes[k].set_ylim(17.3, 19.2)
for k in ('U N', 'Z N', 'aUV N'):
axes[k].set_xlim(17.3, 19.2)
#plt.savefig('par2.pdf')
plt.savefig('par2.png',dpi=250)
| mit |
jswanljung/iris | docs/iris/example_code/General/inset_plot.py | 7 | 2357 | """
Test Data Showing Inset Plots
=============================
This example demonstrates the use of a single 3D data cube with time, latitude
and longitude dimensions to plot a temperature series for a single latitude
coordinate, with an inset plot of the data region.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import cartopy.crs as ccrs
import iris.quickplot as qplt
import iris.plot as iplt
def main():
# Load the data
with iris.FUTURE.context(netcdf_promote=True):
cube1 = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
# Slice into cube to retrieve data for the inset map showing the
# data region
region = cube1[-1, :, :]
# Average over latitude to reduce cube to 1 dimension
plot_line = region.collapsed('latitude', iris.analysis.MEAN)
# Open a window for plotting
fig = plt.figure()
# Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
ax_main = fig.add_subplot(1, 1, 1)
# Produce a quick plot of the 1D cube
qplt.plot(plot_line)
# Set x limits to match the data
ax_main.set_xlim(0, plot_line.coord('longitude').points.max())
# Adjust the y limits so that the inset map won't clash with main plot
ax_main.set_ylim(294, 310)
ax_main.set_title('Meridional Mean Temperature')
# Add grid lines
ax_main.grid()
# Add a second set of axes specifying the fractional coordinates within
# the figure with bottom left corner at x=0.55, y=0.58 with width
# 0.3 and height 0.25.
# Also specify the projection
ax_sub = fig.add_axes([0.55, 0.58, 0.3, 0.25],
projection=ccrs.Mollweide(central_longitude=180))
# Use iris.plot (iplt) here so colour bar properties can be specified
# Also use a sequential colour scheme to reduce confusion for those with
# colour-blindness
iplt.pcolormesh(region, cmap='Blues')
# Manually set the orientation and tick marks on your colour bar
ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
plt.colorbar(orientation='horizontal', ticks=ticklist)
ax_sub.set_title('Data Region')
# Add coastlines
ax_sub.coastlines()
# request to show entire map, using the colour mesh on the data region only
ax_sub.set_global()
qplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
gviejo/ThalamusPhysio | python/main_make_MAPinfo.py | 1 | 14284 | #!/usr/bin/env python
'''
File name: main_make_movie.py
Author: Guillaume Viejo
Date created: 09/10/2017
Python Version: 3.5.2
To make shank mapping
'''
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
from pylab import *
from sklearn.decomposition import PCA
import _pickle as cPickle
import neuroseries as nts
import sys
sys.exit()
###############################################################################################################
# LOADING DATA
###############################################################################################################
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
swr_mod, swr_ses = loadSWRMod('/mnt/DataGuillaume/MergedData/SWR_THAL_corr.pickle', datasets, return_index=True)
spind_mod, spind_ses = loadSpindMod('/mnt/DataGuillaume/MergedData/SPINDLE_mod.pickle', datasets, return_index=True)
spike_spindle_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_SPINDLE_PHASE.pickle', 'rb'))
spike_theta_phase = cPickle.load(open('/mnt/DataGuillaume/MergedData/SPIKE_THETA_PHASE.pickle', 'rb'))
nbins = 400
binsize = 5
times = np.arange(0, binsize*(nbins+1), binsize) - (nbins*binsize)/2
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
# filtering swr_mod
swr = pd.DataFrame( columns = swr_ses,
index = times,
data = gaussFilt(swr_mod, (10,)).transpose())
# Cut swr_mod from -500 to 500
swr = swr.loc[-500:500]
# CHECK FOR NAN
tmp1 = swr.columns[swr.isnull().any()].values
tmp2 = theta.index[theta.isnull().any(1)].values
# CHECK P-VALUE
tmp3 = theta.index[(theta['pvalue'] > 1).values].values
tmp = np.unique(np.concatenate([tmp1,tmp2,tmp3]))
# copy and delete
if len(tmp):
swr_modth = swr.drop(tmp, axis = 1)
theta_modth = theta.drop(tmp, axis = 0)
swr_modth_copy = swr_modth.copy()
neuron_index = swr_modth.columns
times = swr_modth.loc[-500:500].index.values
###############################################################################################################
# MOVIE + jPCA for each animal
###############################################################################################################
mouses = ['Mouse12', 'Mouse17', 'Mouse20', 'Mouse32']
# times = np.arange(0, 1005, 5) - 500 # BAD
interval_to_cut = { 'Mouse12':[89,128],
'Mouse17':[84,123],
'Mouse20':[92,131],
'Mouse32':[80,125]}
movies = dict.fromkeys(mouses)
rXX = dict.fromkeys(mouses)
maps = dict.fromkeys(mouses)
headdir = dict.fromkeys(mouses)
adnloc = dict.fromkeys(mouses)
xpos = dict.fromkeys(mouses)
ypos = dict.fromkeys(mouses)
xpos_shank = dict.fromkeys(mouses)
ypos_shank = dict.fromkeys(mouses)
xpos_phase = dict.fromkeys(mouses)
ypos_phase = dict.fromkeys(mouses)
theta_dens = dict.fromkeys(mouses)
hd_neurons_index = []
for m in mouses:
print(m)
depth = pd.DataFrame(index = np.genfromtxt(data_directory+m+"/"+m+".depth", dtype = 'str', usecols = 0),
data = np.genfromtxt(data_directory+m+"/"+m+".depth", usecols = 1),
columns = ['depth'])
neurons = np.array([n for n in neuron_index if m in n])
sessions = np.unique([n.split("_")[0] for n in neuron_index if m in n])
nb_bins = 201
swr_shank = np.zeros((len(sessions),8,nb_bins))
# nb_bins = interval_to_cut[m][1] - interval_to_cut[m][0]
theta_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
spindle_shank = np.zeros((len(sessions),8,30)) # that's radian bins here
bins_phase = np.linspace(0.0, 2*np.pi+0.00001, 31)
count_total = np.zeros((len(sessions),8))
hd_neurons = np.zeros((len(sessions),8))
amplitute = np.zeros((len(sessions),8))
mod_theta = np.zeros((len(sessions),8))
###########################################################################################################
# JPCA
###########################################################################################################
rX,phi_swr,dynamical_system = jPCA(swr_modth[neurons].values.transpose(), times)
phi_swr = pd.DataFrame(index = neurons, data = phi_swr)
###########################################################################################################
# VARIOUS
###########################################################################################################
for s in sessions:
generalinfo = scipy.io.loadmat(data_directory+m+"/"+s+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
spikes,shank = loadSpikeData(data_directory+m+"/"+s+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
hd_info = scipy.io.loadmat(data_directory+m+'/'+s+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
shankIndex = np.array([shank[n] for n in spikes.keys()]).flatten()
if np.max(shankIndex) > 8 : sys.exit("Invalid shank index for thalamus" + s)
shank_to_neurons = {k:np.array(list(spikes.keys()))[shankIndex == k] for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
count_total[np.where(sessions== s)[0][0],k] = len(shank_to_neurons[k])
hd_neurons[np.where(sessions== s)[0][0],k] = np.sum(hd_info_neuron[shankIndex == k])
mod_theta[np.where(sessions== s)[0][0],k] = (theta.loc[[s+'_'+str(i) for i in shank_to_neurons[k]]]['pvalue'] < 0.05).sum()
# amplitute[np.where(sessions==s)[0][0],k] = (swr.loc[shank_to_neurons[k]].var(1)).mean()
###########################################################################################################
# SWR MOD
###########################################################################################################
neurons_mod_in_s = np.array([n for n in neurons if s in n])
shank_to_neurons = {k:np.array([n for n in neurons_mod_in_s if shankIndex[int(n.split("_")[1])] == k]) for k in np.unique(shankIndex)}
for k in shank_to_neurons.keys():
# if np.sum(hd_info_neuron[[int(n.split("_")[1]) for n in shank_to_neurons[k]]]):
# print(s, k, len(shank_to_neurons[k]))
# if s == 'Mouse17-130204': sys.exit()
if len(shank_to_neurons[k]):
swr_shank[np.where(sessions== s)[0][0],k] = swr_modth[shank_to_neurons[k]].mean(1).values
###########################################################################################################
# THETA MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
phi = spike_theta_phase['rem'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
theta_shank[np.where(sessions == s)[0][0],k,t] += 1.0
###########################################################################################################
# SPIND HPC MOD
###########################################################################################################
for k in shank_to_neurons.keys():
if len(shank_to_neurons[k]):
for n in shank_to_neurons[k]:
if n in list(spike_spindle_phase.keys()):
phi = spike_spindle_phase['hpc'][n]
phi[phi<0.0] += 2*np.pi
index = np.digitize(phi, bins_phase)-1
for t in index:
spindle_shank[np.where(sessions == s)[0][0],k,t] += 1.0
for t in range(len(times)):
swr_shank[:,:,t] = np.flip(swr_shank[:,:,t], 1)
for t in range(theta_shank.shape[-1]):
theta_shank[:,:,t] = np.flip(theta_shank[:,:,t], 1)
spindle_shank[:,:,t] = np.flip(spindle_shank[:,:,t], 1)
# saving
movies[m] = { 'swr' : swr_shank ,
'theta' : theta_shank ,
'spindle': spindle_shank }
hd_neurons = hd_neurons/(count_total+1.0)
mod_theta = mod_theta/(count_total+1.0)
rXX[m] = rX
maps[m] = { 'total': np.flip(count_total,1),
'x' : np.arange(0.0, 8*0.2, 0.2),
'y' : depth.loc[sessions].values.flatten()
}
headdir[m] = np.flip(hd_neurons, 1)
theta_dens[m] = np.flip(mod_theta, 1)
for m in movies.keys():
datatosave = { 'movies':movies[m],
'total':maps[m]['total'],
'x':maps[m]['x'],
'y':maps[m]['y'],
'headdir':headdir[m],
'jpc':rXX[m],
'theta_dens':theta_dens[m]
}
cPickle.dump(datatosave, open("../data/maps/"+m+".pickle", 'wb'))
sys.exit()
m = 'Mouse12'
space = 0.01
thl_lines = np.load("../figures/thalamus_lines.mat.npy").sum(2)
xlines, ylines, thl_lines = interpolate(thl_lines, np.linspace(maps[m]['x'].min(), maps[m]['x'].max(), thl_lines.shape[1]),
np.linspace(maps[m]['y'].min(), maps[m]['y'].max(), thl_lines.shape[0]), 0.001)
thl_lines -= thl_lines.min()
thl_lines /= thl_lines.max()
thl_lines[thl_lines>0.6] = 1.0
thl_lines[thl_lines<=0.6] = 0.0
xnew, ynew, total = interpolate(maps[m]['total'].copy(), maps[m]['x'], maps[m]['y'], space)
# total -= total.min()
# total /= total.max()
total = softmax(total, 20.0, 0.2)
for k in movies[m].keys():
movies[m][k] = filter_(movies[m][k], (2,2,5))
filmov = dict.fromkeys(movies[m].keys())
for k in filmov:
tmp = []
for t in range(movies[m][k].shape[-1]):
# frame = movies[m][k][:,:,t] / (maps[m]['total']+1.0)
frame = movies[m][k][:,:,t]
xnew, ynew, frame = interpolate(frame, maps[m]['x'], maps[m]['y'], space)
tmp.append(frame)
tmp = np.array(tmp)
filmov[k] = filter_(tmp, 5)
filmov[k] = filmov[k] - np.min(filmov[k])
filmov[k] = filmov[k] / np.max(filmov[k] + 1e-8)
filmov[k] = softmax(filmov[k], 10, 0.5)
xnew, ynew, head = interpolate(headdir[m].copy(), maps[m]['x'], maps[m]['y'], space)
head[head < np.percentile(head, 90)] = 0.0
# sys.exit()
# figure()
# index = np.arange(0,20,1)+90
# for i in range(len(index)):
# subplot(4,5,i+1)
# # imshow(get_rgb(filmov['swr'][index[i]].copy(), total.copy(), np.ones_like(total), 0.83),
# imshow(filmov['swr'][index[i]].copy(),
# aspect = 'auto',
# origin = 'upper',
# cmap = 'jet', vmin = 0.0, vmax = 1.0)
# # extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# title("t = "+str(times[index[i]])+" ms")
# # contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
# # contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
# # show(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]))
# show()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,1)
images = [axes.imshow(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65), vmin = 0.0, vmax = 1.0, aspect = 'equal', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
# images = [axes.imshow(filmov['swr'][0], aspect = 'equal', origin = 'upper', cmap = 'jet', vmin = 0.0, vmax = 1.0, extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))]
axes.contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]), cmap = 'gist_gray')
axes.contour(thl_lines, aspect = 'equal', origin = 'upper', extent = (xlines[0], xlines[-1], ylines[-1], ylines[0]), colors = 'white')
def init():
images[0].set_data(get_rgb(filmov['swr'][0].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][0])
return images
def animate(t):
images[0].set_data(get_rgb(filmov['swr'][t].copy(), np.ones_like(total), total, 0.65))
# images[0].set_data(filmov['swr'][t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=False, repeat_delay = 5000)
anim.save('../figures/swr_mod_'+m+'.gif', writer='imagemagick', fps=60)
show()
sys.exit()
sys.exit()
from matplotlib import animation, rc
from IPython.display import HTML, Image
rc('animation', html='html5')
fig, axes = plt.subplots(1,3)
images = []
for k, i in zip(['swr', 'theta', 'spindle'], range(3)):
images.append(axes[i].imshow(filmov[k][0], aspect = 'auto', cmap = 'jet', origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0])))
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
def init():
for i in range(3): images[i].set_data(filmov[k][0])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
def animate(t):
for i in range(3): images[i].set_data(filmov[k][t])
contour(head, aspect = 'equal',origin = 'upper', extent = (xnew[0], xnew[-1], ynew[-1], ynew[0]))
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=range(len(times)), interval=0, blit=True, repeat_delay = 0)
sys.exit()
m = 'Mouse12'
images = []
# for i in range(len(mouses)):
# lines1.append(axes[0,i].plot([],[],'o-')[0])
# lines2.append(axes[0,i].plot([],[],'o-')[0])
# axes[0,i].set_xlim(-500, 500)
# axes[0,i].set_ylim(rXX[mouses[i]].min(), rXX[mouses[i]].max())
images.append(axes.imshow(movies[m]['spindle'][:,:,0], aspect = 'auto', cmap = 'jet'))
def init():
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][0])
# lines1[i].set_data(times[0], rXX[m][0,0])
# lines2[i].set_data(times[0], rXX[m][0,1])
# return images+lines1+lines2
images[0].set_data(movies[m]['spindle'][:,:,0])
return images
def animate(t):
# for i, m in zip(range(len(mouses)), mouses):
# images[i].set_data(movies[m][t])
# lines1[i].set_data(times[0:t], rXX[m][0:t,0])
# lines2[i].set_data(times[0:t], rXX[m][0:t,1])
images[0].set_data(movies[m]['spindle'][:,:,t])
return images
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=movies[m]['spindle'].shape[-1], interval=0, blit=True, repeat_delay = 1)
show()
# anim.save('../figures/animation_swr_mod_jpca.gif', writer='imagemagick', fps=60)
| gpl-3.0 |
piyush0609/scipy | scipy/spatial/tests/test__plotutils.py | 71 | 1463 | from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
yongfuyang/vnpy | vn.how/tick2trade/vn.trader_t2t/ctaAlgo/tools/multiTimeFrame/strategyBreakOut.py | 22 | 11811 | # encoding: UTF-8
"""
This file tweaks ctaTemplate Module to suit multi-TimeFrame strategies.
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import numpy as np
########################################################################
class BreakOut(CtaTemplate):
"""
"infoArray" 字典是用来储存辅助品种信息的, 可以是同品种的不同分钟k线, 也可以是不同品种的价格。
调用的方法:
价格序列:
self.infoArray["数据库名 + 空格 + collection名"]["close"]
self.infoArray["数据库名 + 空格 + collection名"]["high"]
self.infoArray["数据库名 + 空格 + collection名"]["low"]
单个价格:
self.infoBar["数据库名 + 空格 + collection名"]
返回的值为一个ctaBarData 或 None
"""
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""日内突破交易策略, 出场方式非常多, 本文件使用指标出场"""
className = 'BreakOut'
author = 'Joe'
super(BreakOut, self).__init__(ctaEngine, setting)
# 设置辅助品种数据字典
self.infoArray = {}
self.initInfobar = {}
self.infoBar = {}
# 缓存数据量
self.bufferSize = 100
self.bufferCount = 0
self.initDays = 10
# 设置参数
self.pOBO_Mult = 0.5 # 计算突破点位
# self.pProtMult = 2 # 止损的ATR倍数
# self.pProfitMult = 2 # 止盈相对于止损的倍数
# self.SlTp_On = False # 止损止盈功能
# self.EODTime = 15 # 设置日内平仓时间
self.vOBO_stretch = EMPTY_FLOAT
self.vOBO_initialpoint = EMPTY_FLOAT
self.vOBO_level_L = EMPTY_FLOAT
self.vOBO_level_S = EMPTY_FLOAT
self.orderList = []
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'pOBO_Mult',
'pProtMult',
'pProfitMult',
'SlTp_On',
'EODTime']
# 变量列表,保存了变量的名称
varList = ['vOBO_stretch',
'vOBO_initialpoint',
'vOBO_level_L',
'vOBO_level_S']
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' % self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
# 推送新数据, 同时检查是否有information bar需要推送
# Update new bar, check whether the Time Stamp matching any information bar
ibar = self.checkInfoBar(bar)
self.onBar(bar, infobar=ibar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
# ----------------------------------------------------------------------
def checkInfoBar(self, bar):
"""在初始化时, 检查辅助品种数据的推送(初始化结束后, 回测时不会调用)"""
initInfoCursorDict = self.ctaEngine.initInfoCursor
# 如果"initInfobar"字典为空, 初始化字典, 插入第一个数据
# If dictionary "initInfobar" is empty, insert first data record
if self.initInfobar == {}:
for info_symbol in initInfoCursorDict:
try:
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
print "Data of information symbols is empty! Input is a list, not str."
raise
# 若有某一品种的 TimeStamp 和执行报价的 TimeStamp 匹配, 则将"initInfobar"中的数据推送,
# 然后更新该品种的数据
# If any symbol's TimeStamp is matched with execution symbol's TimeStamp, return data
# in "initInfobar", and update new data.
temp = {}
for info_symbol in self.initInfobar:
data = self.initInfobar[info_symbol]
# Update data only when Time Stamp is matched
if (data is not None) and (data['datetime'] <= bar.datetime):
try:
temp[info_symbol] = CtaBarData()
temp[info_symbol].__dict__ = data
self.initInfobar[info_symbol] = next(initInfoCursorDict[info_symbol])
except StopIteration:
self.initInfobar[info_symbol] = None
self.ctaEngine.output("No more data for initializing %s." % (info_symbol,))
else:
temp[info_symbol] = None
return temp
# ----------------------------------------------------------------------
def updateInfoArray(self, infobar):
"""收到Infomation Data, 更新辅助品种缓存字典"""
for name in infobar:
data = infobar[name]
# Construct empty array
if len(self.infoArray) < len(infobar) :
self.infoArray[name] = {
"close": np.zeros(self.bufferSize),
"high": np.zeros(self.bufferSize),
"low": np.zeros(self.bufferSize),
"open": np.zeros(self.bufferSize)
}
if data is None:
pass
else:
self.infoArray[name]["close"][0:self.bufferSize - 1] = \
self.infoArray[name]["close"][1:self.bufferSize]
self.infoArray[name]["high"][0:self.bufferSize - 1] = \
self.infoArray[name]["high"][1:self.bufferSize]
self.infoArray[name]["low"][0:self.bufferSize - 1] = \
self.infoArray[name]["low"][1:self.bufferSize]
self.infoArray[name]["open"][0:self.bufferSize - 1] = \
self.infoArray[name]["open"][1:self.bufferSize]
self.infoArray[name]["close"][-1] = data.close
self.infoArray[name]["high"][-1] = data.high
self.infoArray[name]["low"][-1] = data.low
self.infoArray[name]["open"][-1] = data.open
# ----------------------------------------------------------------------
def onBar(self, bar, **kwargs):
"""收到Bar推送(必须由用户继承实现)"""
# Update infomation data
# "infobar"是由不同时间或不同品种的品种数据组成的字典, 如果和执行品种的 TimeStamp 不匹配,
# 则传入的是"None", 当time stamp和执行品种匹配时, 传入的是"Bar"
if "infobar" in kwargs:
self.infoBar = kwargs["infobar"]
self.updateInfoArray(kwargs["infobar"])
# 若读取的缓存数据不足, 不考虑交易
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
a = np.sum(self.infoArray["TestData @GC_1D"]["close"])
if a == 0.0:
return
# Only updating indicators when information bar changes
# 只有在30min或者1d K线更新后才更新指标
TradeOn = False
if any([i is not None for i in self.infoBar]):
TradeOn = True
self.vRange = self.infoArray["TestData @GC_1D"]["high"][-1] -\
self.infoArray["TestData @GC_1D"]["low"][-1]
self.vOBO_stretch = self.vRange * self.pOBO_Mult
self.vOBO_initialpoint = self.infoArray["TestData @GC_1D"]["close"][-1]
self.vOBO_level_L = self.vOBO_initialpoint + self.vOBO_stretch
self.vOBO_level_S = self.vOBO_initialpoint - self.vOBO_stretch
self.atrValue30M = talib.abstract.ATR(self.infoArray["TestData @GC_30M"])[-1]
# 判断是否要进行交易
# 当前无仓位
if (self.pos == 0 and TradeOn == True):
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 若上一个30分钟K线的最高价大于OBO_level_L
# 且当前的价格大于OBO_level_L, 则买入
if self.infoArray["TestData @GC_30M"]["high"][-1] > self.vOBO_level_L:
if bar.close > self.vOBO_level_L:
self.buy(bar.close + 0.5, 1)
# 下单后, 在下一个30Min K线之前不交易
TradeOn = False
# 若上一个30分钟K线的最高价低于OBO_level_S
# 且当前的价格小于OBO_level_S, 则卖出
elif self.infoArray["TestData @GC_30M"]["low"][-1] < self.vOBO_level_S:
if bar.close < self.vOBO_level_S:
self.short(bar.close - 0.5, 1)
# 下单后, 在下一个30Min K线之前不交易
TradeOn = False
# 持有多头仓位
elif self.pos > 0:
# 当价格低于initialpoint水平, 出场
if bar.close < self.vOBO_initialpoint:
self.sell(bar.close - 0.5 , 1)
# 持有空头仓位
elif self.pos < 0:
# 当价格高于initialpoint水平, 出场
if bar.close > self.vOBO_initialpoint:
self.cover(bar.close + 0.5, 1)
# 发出状态更新事件
self.putEvent()
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onTrade(self, trade):
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktestMultiTF import *
from PyQt4 import QtCore, QtGui
import time
'''
创建回测引擎
设置引擎的回测模式为K线
设置回测用的数据起始日期
载入历史数据到引擎中
在引擎中创建策略对象
Create backtesting engine
Set backtest mode as "Bar"
Set "Start Date" of data range
Load historical data to engine
Create strategy instance in engine
'''
engine = BacktestEngineMultiTF()
engine.setBacktestingMode(engine.BAR_MODE)
engine.setStartDate('20120101')
engine.setEndDate('20150101')
engine.setDatabase("TestData", "@GC_1M", info_symbol=[("TestData","@GC_30M"),
("TestData","@GC_1D")])
# Set parameters for strategy
engine.initStrategy(BreakOut, {})
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setCommission(0.3 / 10000) # 万0.3
engine.setSize(1) # 股指合约大小
# 开始跑回测
start = time.time()
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
print 'Time consumed:%s' % (time.time() - start) | mit |
breeezzz/local-bitcoins-api | LocalBitcoins/market_depth.py | 1 | 6253 | '''
Created on 7 Jun 2013
@author: Jamie
'''
import urllib2
import math
import re
import itertools
import argparse
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
markets = {'UK': {'url': 'gb/united%20kingdom/', 'curr': 'GBP'},
'USA': {'url': 'us/united%20states/', 'curr': 'USD'},
'GERMANY': {'url': 'de/germany/', 'curr': 'EUR'},
'ITALY': {'url': 'it/italy/', 'curr': 'EUR'},
'SPAIN': {'url': 'es/spain/', 'curr': 'EUR'},
'AUSTRALIA': {'url': 'au/australia/', 'curr': 'AUD'},
'ARGENTINA': {'url': 'ar/argentina/', 'curr': 'ARS'},
'NETHERLANDS': {'url': 'nl/netherlands/', 'curr': 'EUR'},
'BRAZIL': {'url': 'br/brazil/', 'curr': 'BRL'},
'FRANCE': {'url': 'fr/france/', 'curr': 'EUR'},
'GBP': {'url': 'gbp/', 'curr': 'GBP'},
'USD': {'url': 'usd/', 'curr': 'USD'},
'EUR': {'url': 'eur/', 'curr': 'EUR'},
}
methods = {'NATIONAL_BANK_TRANSFER': 'national-bank-transfer/'}
method = ''
buy_url = 'https://localbitcoins.com/buy-bitcoins-online/'
sell_url = 'https://localbitcoins.com/sell-bitcoins-online/'
def get_ads_dict(soup, buy_sell):
prices = get_prices(soup)
users = get_users(soup)
amounts = get_amounts(soup)
amounts = [a/p for a,p in zip(amounts, prices)] # To give amount in BTC
currency = get_currency(soup)
methods = get_methods(soup)
lists = set(zip(prices, users, amounts, currency))
if buy_sell == 'buy':
sorted_ads = sorted(lists)
elif buy_sell == 'sell':
sorted_ads = sorted(lists)[::-1]
prices = [item[0] for item in sorted_ads]
users = [item[1] for item in sorted_ads]
amounts = [item[2] for item in sorted_ads]
currency = [item[3] for item in sorted_ads]
depth = get_depth(amounts)
ads_dict = {'users': users, 'prices': prices, 'amounts': amounts,
'depth': depth, 'currency': currency, 'methods': methods}
return ads_dict
def get_prices(soup):
''' Returns a list of prices '''
prices = soup.find_all('td', attrs={'class':"column-price"})
prices = [float(re.findall("\d+.\d+", price.get_text())[0]) for price in prices]
return prices
def get_currency(soup):
''' Returns a list of currencies '''
prices = soup.find_all('td', attrs={'class':"column-price"})
currencies = [price.get_text().split()[-1] for price in prices]
return currencies
def get_methods(soup):
''' Returns a list of payment methods '''
methods = soup.find_all('tr', attrs={'class':"clickable"})
methods = [method.get_text().split('\n')[-7].strip() for method in methods]
return methods
def get_users(soup):
''' Returns a list of users '''
users = soup.find_all('td', attrs={'class':"column-user"})
users = [user.get_text().split()[0] for user in users]
return users
def get_amounts(soup):
''' Returns a list of amounts '''
raw_amounts = soup.find_all('td', attrs={'class':"column-limit"})
amounts = []
for amount in raw_amounts:
try:
amounts += [float(amount.get_text().split()[2])]
except:
amounts += [0.0]
return amounts
def get_depth(amounts):
''' Generates the cumulative amount for each point on the curve '''
cum_amounts = []
cum_amount = 0
for amount in amounts:
cum_amount += amount
cum_amounts += [cum_amount]
return cum_amounts
def get_buy_curve(market):
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
buy_prices = [i for i,j in zip(buy_ads['prices'], buy_ads['currency']) if j == market['curr']]
buy_depth = [i for i,j in zip(buy_ads['depth'], buy_ads['currency']) if j == market['curr']]
buy_prices = double_list(buy_prices)[1:]
buy_depth = double_list(buy_depth)[:-1]
return buy_prices[:-2], buy_depth[:-2]
def get_sell_curve(market):
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
sell_prices = [i for i,j in zip(sell_ads['prices'], sell_ads['currency']) if j == market['curr']][::-1]
sell_depth = [i for i,j in zip(sell_ads['depth'], sell_ads['currency']) if j == market['curr']][::-1]
sell_prices = double_list(sell_prices)[1:]
sell_depth = double_list(sell_depth)[:-1]
return sell_prices, sell_depth
def plot_chart(ax, buy, sell):
ax.plot(buy[0], buy[1], color='r')
ax.plot(sell[0], sell[1], color='g')
def double_list(list_in):
iters = [iter(list_in), iter(list_in)]
return list(it.next() for it in itertools.cycle(iters))
def get_bid(country):
market = markets[country]
response = urllib2.urlopen(buy_url + market['url'] + method)
soup = BeautifulSoup(response)
buy_ads = get_ads_dict(soup, 'buy')
bid = buy_ads['prices'][0]
return bid
def get_ask(country):
market = markets[country]
response = urllib2.urlopen(sell_url + market['url'] + method)
soup = BeautifulSoup(response)
sell_ads = get_ads_dict(soup, 'sell')
ask = sell_ads['prices'][0]
return ask
def make_charts(*args):
if len(args[0].countries) == 0:
selection = ['UK','USA','SPAIN','FRANCE','GERMANY','BRAZIL']
else:
selection = args[0].countries
fig = plt.figure()
dim = math.ceil(len(selection)**0.5)
for x, s in enumerate(selection):
market = markets[s]
# method = methods['NATIONAL_BANK_TRANSFER']
ax = fig.add_subplot(dim, dim, x+1)
ax.set_xlabel(market['curr'])
ax.set_ylabel('BTC')
ax.set_title('Local Bitcoins online: %s' % s)
buy_curve = get_buy_curve(market)
sell_curve = get_sell_curve(market)
plot_chart(ax, buy_curve, sell_curve)
plt.tight_layout()
plt.show()
def main():
parser = argparse.ArgumentParser(description='Display charts of the Local Bitcoin market depth.')
parser.add_argument('countries', type=str, nargs='*',
help='optionally specify any number of country names')
args = parser.parse_args()
make_charts(args)
if __name__ == '__main__':
main()
| mit |
planetarymike/IDL-Colorbars | IDL_py_test/027_Eos_B.py | 1 | 5942 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 1., 1.],
[1., 1., 1.],
[0.498039, 0.498039, 0.498039],
[0., 0., 0.513725],
[0., 0., 0.533333],
[0., 0., 0.54902],
[0., 0., 0.564706],
[0., 0., 0.580392],
[0., 0., 0.6],
[0., 0., 0.615686],
[0., 0., 0.568627],
[0., 0., 0.584314],
[0., 0., 0.666667],
[0., 0., 0.682353],
[0., 0., 0.698039],
[0., 0., 0.713725],
[0., 0., 0.733333],
[0., 0., 0.74902],
[0., 0., 0.764706],
[0., 0., 0.780392],
[0., 0., 0.717647],
[0., 0., 0.733333],
[0., 0., 0.831373],
[0., 0., 0.847059],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 0.898039],
[0., 0., 0.913725],
[0., 0., 0.933333],
[0., 0., 0.94902],
[0., 0., 0.866667],
[0., 0., 0.882353],
[0., 0., 1.],
[0., 0.027451, 0.968627],
[0., 0.0588235, 0.937255],
[0., 0.0901961, 0.905882],
[0., 0.121569, 0.87451],
[0., 0.152941, 0.843137],
[0., 0.184314, 0.811765],
[0., 0.215686, 0.780392],
[0., 0.223529, 0.67451],
[0., 0.25098, 0.643137],
[0., 0.309804, 0.686275],
[0., 0.341176, 0.654902],
[0., 0.372549, 0.623529],
[0., 0.403922, 0.592157],
[0., 0.435294, 0.560784],
[0., 0.466667, 0.529412],
[0., 0.498039, 0.498039],
[0., 0.529412, 0.466667],
[0., 0.505882, 0.392157],
[0., 0.533333, 0.364706],
[0., 0.623529, 0.372549],
[0., 0.654902, 0.341176],
[0., 0.686275, 0.309804],
[0., 0.717647, 0.278431],
[0., 0.74902, 0.247059],
[0., 0.780392, 0.215686],
[0., 0.811765, 0.184314],
[0., 0.843137, 0.152941],
[0., 0.784314, 0.109804],
[0., 0.811765, 0.0823529],
[0., 0.937255, 0.0588235],
[0., 0.968627, 0.027451],
[0., 1., 0.],
[0.0352941, 1., 0.],
[0.0705882, 1., 0.],
[0.105882, 1., 0.],
[0.141176, 1., 0.],
[0.176471, 1., 0.],
[0.192157, 0.898039, 0.],
[0.223529, 0.898039, 0.],
[0.282353, 1., 0.],
[0.317647, 1., 0.],
[0.356863, 1., 0.],
[0.392157, 1., 0.],
[0.427451, 1., 0.],
[0.462745, 1., 0.],
[0.498039, 1., 0.],
[0.533333, 1., 0.],
[0.513725, 0.898039, 0.],
[0.545098, 0.898039, 0.],
[0.639216, 1., 0.],
[0.678431, 1., 0.],
[0.713725, 1., 0.],
[0.74902, 1., 0.],
[0.784314, 1., 0.],
[0.819608, 1., 0.],
[0.854902, 1., 0.],
[0.890196, 1., 0.],
[0.835294, 0.898039, 0.],
[0.866667, 0.898039, 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.964706, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.882353, 0.],
[0.898039, 0.776471, 0.],
[0.898039, 0.764706, 0.],
[1., 0.831373, 0.],
[1., 0.815686, 0.],
[1., 0.8, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.713725, 0.],
[0.898039, 0.627451, 0.],
[0.898039, 0.611765, 0.],
[1., 0.662745, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.615686, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.54902, 0.],
[0.898039, 0.478431, 0.],
[0.898039, 0.462745, 0.],
[1., 0.498039, 0.],
[1., 0.490196, 0.],
[1., 0.482353, 0.],
[1., 0.47451, 0.],
[1., 0.466667, 0.],
[1., 0.454902, 0.],
[1., 0.447059, 0.],
[1., 0.439216, 0.],
[0.898039, 0.388235, 0.],
[0.898039, 0.380392, 0.],
[1., 0.415686, 0.],
[1., 0.407843, 0.],
[1., 0.4, 0.],
[1., 0.388235, 0.],
[1., 0.380392, 0.],
[1., 0.372549, 0.],
[1., 0.364706, 0.],
[1., 0.356863, 0.],
[0.898039, 0.313725, 0.],
[0.898039, 0.305882, 0.],
[1., 0.329412, 0.],
[1., 0.321569, 0.],
[1., 0.313725, 0.],
[1., 0.305882, 0.],
[1., 0.298039, 0.],
[1., 0.290196, 0.],
[1., 0.282353, 0.],
[1., 0.27451, 0.],
[0.898039, 0.239216, 0.],
[0.898039, 0.231373, 0.],
[1., 0.247059, 0.],
[1., 0.239216, 0.],
[1., 0.231373, 0.],
[1., 0.223529, 0.],
[1., 0.215686, 0.],
[1., 0.207843, 0.],
[1., 0.196078, 0.],
[1., 0.188235, 0.],
[0.898039, 0.164706, 0.],
[0.898039, 0.156863, 0.],
[1., 0.164706, 0.],
[1., 0.156863, 0.],
[1., 0.14902, 0.],
[1., 0.141176, 0.],
[1., 0.129412, 0.],
[1., 0.121569, 0.],
[1., 0.113725, 0.],
[1., 0.105882, 0.],
[0.898039, 0.0862745, 0.],
[0.898039, 0.0823529, 0.],
[1., 0.0823529, 0.],
[1., 0.0745098, 0.],
[1., 0.0627451, 0.],
[1., 0.054902, 0.],
[1., 0.0470588, 0.],
[1., 0.0509804, 0.],
[1., 0.0313725, 0.],
[1., 0.0235294, 0.],
[0.898039, 0.0117647, 0.],
[0.898039, 0.00392157, 0.],
[1., 0., 0.],
[0.992157, 0., 0.],
[0.984314, 0., 0.],
[0.976471, 0., 0.],
[0.968627, 0., 0.],
[0.960784, 0., 0.],
[0.952941, 0., 0.],
[0.945098, 0., 0.],
[0.843137, 0., 0.],
[0.839216, 0., 0.],
[0.921569, 0., 0.],
[0.917647, 0., 0.],
[0.909804, 0., 0.],
[0.901961, 0., 0.],
[0.894118, 0., 0.],
[0.886275, 0., 0.],
[0.878431, 0., 0.],
[0.870588, 0., 0.],
[0.776471, 0., 0.],
[0.768627, 0., 0.],
[0.847059, 0., 0.],
[0.843137, 0., 0.],
[0.835294, 0., 0.],
[0.827451, 0., 0.],
[0.819608, 0., 0.],
[0.811765, 0., 0.],
[0.803922, 0., 0.],
[0.796078, 0., 0.],
[0.709804, 0., 0.],
[0.701961, 0., 0.],
[0.772549, 0., 0.],
[0.768627, 0., 0.],
[0.760784, 0., 0.],
[0.752941, 0., 0.],
[0.745098, 0., 0.],
[0.737255, 0., 0.],
[0.729412, 0., 0.],
[0.721569, 0., 0.],
[0.643137, 0., 0.],
[0.635294, 0., 0.],
[0.698039, 0., 0.],
[0.690196, 0., 0.],
[0.686275, 0., 0.],
[0.678431, 0., 0.],
[0.670588, 0., 0.],
[0.662745, 0., 0.],
[0.654902, 0., 0.],
[0.647059, 0., 0.],
[0.576471, 0., 0.],
[0.568627, 0., 0.],
[0.623529, 0., 0.],
[0.615686, 0., 0.],
[0.611765, 0., 0.],
[0.603922, 0., 0.],
[0.596078, 0., 0.],
[0.588235, 0., 0.],
[0.580392, 0., 0.],
[0.572549, 0., 0.],
[0.509804, 0., 0.],
[0.501961, 0., 0.],
[0.54902, 0., 0.],
[0.541176, 0., 0.],
[0.537255, 0., 0.],
[0.529412, 0., 0.],
[0.521569, 0., 0.],
[0.513725, 0., 0.],
[0.505882, 0., 0.],
[0.498039, 0., 0.],
[0.443137, 0., 0.],
[0.435294, 0., 0.],
[0.47451, 0., 0.],
[0.466667, 0., 0.],
[0.458824, 0., 0.],
[0.458824, 0., 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
williamleif/histwords | statutils/plothelper.py | 2 | 5401 | import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
def trendline(xd, yd, order=1, c='r', alpha=1, plot_r=False, text_pos=None):
"""Make a line of best fit"""
#Calculate trendline
coeffs = np.polyfit(xd, yd, order)
intercept = coeffs[-1]
slope = coeffs[-2]
if order == 2: power = coeffs[0]
else: power = 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
#Plot trendline
plt.plot(xl, yl, color=c, alpha=alpha)
#Calculate R Squared
r = sp.stats.pearsonr(xd, yd)[0]
if plot_r == False:
#Plot R^2 value
if text_pos == None:
text_pos = (0.9 * maxxd + 0.1 * minxd, 0.9 * np.max(yd) + 0.1 * np.min(yd),)
plt.text(text_pos[0], text_pos[1], '$R = %0.2f$' % r)
else:
#Return the R^2 value:
return r
def plot_nice_err(x, y, y_err, color='blue', ls='-', lw=1):
plt.plot(x, y, color=color, ls=ls, lw=lw)
plt.fill_between(x, y-y_err, y+y_err, alpha=0.1, color=color)
def plot_word_dist(info, words, start_year, end_year, one_minus=False, legend_loc='upper left'):
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
plot_info = {}
for word in words:
plot_info[word] = info[word]
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict.iteritems():
if year >= start_year and year <= end_year:
x.append(year)
if one_minus:
val = 1 - val
y.append(val)
color = colors.pop()
plt.plot(x, smooth(np.array(y)), color=color)
plt.scatter(x, y, marker='.', color=color)
plt.legend(plot_info.keys(), loc=legend_loc)
return plt
def get_ccdf(deg_hist, x_min=1):
cum_counts = [0]
degs = range(x_min, np.max(deg_hist.keys()))
total_sum = 0
for deg in degs:
if deg in deg_hist:
deg_count = deg_hist[deg]
else:
deg_count = 0
total_sum += deg_count
cum_counts.append((cum_counts[-1] + deg_count))
return np.array(degs), 1 - np.array(cum_counts[1:]) / float(total_sum)
def plot_word_basic(info, words, start_year, end_year, datatype):
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
plot_info = {}
for word in words:
plot_info[word] = info[word]
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict[datatype].iteritems():
if year >= start_year and year <= end_year:
x.append(year)
y.append(val)
color = colors.pop()
plt.plot(x, smooth(np.array(y)), color=color)
plt.scatter(x, y, marker='.', color=color)
plt.legend(plot_info.keys())
plt.show()
def plot_basic(plot_info, start_year, end_year):
for title, data_dict in plot_info.iteritems():
x = []; y = []
for year, val in data_dict.iteritems():
if year >= start_year and year <= end_year:
x.append(year)
y.append(val)
plt.plot(x, y)
plt.legend(plot_info.keys())
plt.show()
def plot_smooth(x, y, color='blue', window_len=7, window='hanning', ax=None, lw=1.0, ls="-", **kwargs):
if ax == None:
_, ax = plt.subplots(1,1)
ax.plot(x, smooth(np.array(y), window_len=window_len), color=color, lw=lw, ls=ls)
ax.scatter(x, y, color=color, **kwargs)
return ax
def smooth(x, window_len=7, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
y = y[(window_len/2 - 1):-(window_len/2 + 1)]
return y
| apache-2.0 |
c-PRIMED/puq | test/UniformPDF_test.py | 1 | 4485 | #! /usr/bin/env python
'''
Testsuite for the UniformPDF class
'''
from __future__ import absolute_import, division, print_function
import numpy as np
from puq import *
import scipy.stats as stats
def _hisplot(y, nbins):
n, bins = np.histogram(y, nbins, normed=True)
mids = bins[:-1] + np.diff(bins) / 2.0
return mids, n
def compare_curves(x1, y1, x2, y2, **args):
ay = np.interp(x2, x1, y1)
rmse = np.sqrt(np.sum((ay - y2)**2))
print("maximum difference is", np.max(np.abs(ay - y2)))
print("RMSE=%s" % rmse)
# assert rmse < .002
assert np.allclose(ay, y2, **args)
def _test_updf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
assert isinstance(c, PDF)
x = c.x
y = stats.uniform(min, max-min).pdf(x)
rmse = np.sqrt(np.sum((c.y - y)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.y - y)))
assert rmse < 1e-11
def _test_ucdf(min, max):
options['pdf']['samples'] = 1000
c = UniformPDF(min=min, max=max)
cdfy = stats.uniform(min, max-min).cdf(c.x)
rmse = np.sqrt(np.sum((c.cdfy - cdfy)**2))
print("RMSE=%s" % rmse)
print("MaxError=", np.max(abs(c.cdfy - cdfy)))
assert rmse < 1e-11
"""
import matplotlib.pyplot as plt
plt.plot(c.x, c.cdfy, color='green')
plt.plot(c.x, cdfy, color='red')
plt.show()
"""
# test mean, min, max and deviation
def _test_uniform_minmeanmax(min, mean, max):
c = UniformPDF(min=min, mean=mean, max=max)
cmin, cmax = c.range
print("min=%s mean=%s max=%s" % (cmin, c.mean, cmax))
if min is not None:
assert min == cmin
else:
assert cmin == mean - (max - mean)
if max is not None:
assert max == cmax
else:
assert cmax == mean + (mean - min)
if mean is not None:
assert np.allclose(mean, c.mean)
else:
assert np.allclose(c.mean, (min + max) / 2.0)
# test lhs()
def _test_uniform_lhs(min, max):
c = UniformPDF(min=min, max=max)
# test the lhs() function to see if the curve it generates is
# close enough
data = c.ds(10000)
assert len(data) == 10000
assert np.min(data) >= min
assert np.max(data) <= max
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.0001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='red')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
# test lhs1()
def _test_uniform_lhs1(min, max):
c = UniformPDF(min=min, max=max)
data = c.ds1(1000)
xs = data
assert len(xs) == 1000
assert min, max == c.range
# scale [-1,1] back to original size
mean = (min + max)/2.0
xs *= max - mean
xs += mean
dx, dy = _hisplot(xs, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.001)
"""
import matplotlib.pyplot as plt
plt.plot(x, y, color='green')
plt.plot(dx, dy, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def _test_uniform_random(min, max):
c = UniformPDF(min=min, max=max)
data = c.random(1000000)
assert len(data) == 1000000
dx, dy = _hisplot(data, 20)
x = dx
y = stats.uniform(min, max-min).pdf(x)
compare_curves(x, y, dx, dy, atol=.02)
assert np.min(data) >= min
assert np.max(data) <= max
"""
import matplotlib.pyplot as plt
plt.plot(dx, dy, color='red')
plt.plot(x, y, color='blue')
plt.show()
"""
assert np.allclose(c.mean, np.mean(data), rtol=.001), 'mean=%s' % np.mean(data)
def test_updf():
_test_updf(10,20)
_test_updf(-20,-10)
def test_ucdf():
_test_ucdf(100,105)
_test_ucdf(-1,2)
def test_uniform_minmeanmax():
_test_uniform_minmeanmax(0,None,20)
_test_uniform_minmeanmax(None,0.5,2)
_test_uniform_minmeanmax(5,10,15)
_test_uniform_minmeanmax(5,10,None)
def test_uniform_lhs():
_test_uniform_lhs(10,20)
_test_uniform_lhs(-100, -50)
def test_uniform_lhs1():
_test_uniform_lhs1(10,20)
_test_uniform_lhs1(-100, -50)
def test_uniform_random():
_test_uniform_random(10,20)
if __name__ == "__main__":
test_updf()
test_ucdf()
test_uniform_minmeanmax()
test_uniform_lhs()
test_uniform_lhs1()
test_uniform_random()
| mit |
aolindahl/streaking | process_hdf5.py | 1 | 46151 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 15:37:51 2015
@author: Anton O Lindahl
"""
import h5py
import argparse
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import sys
import lmfit
import warnings
from aolPyModules import wiener, wavelet_filter
import time_to_energy_conversion as tof_to_energy
from aolPyModules import plotting as aol_plotting
import area_fill
prompt_roi = [1.508, 1.535]
streak_time_roi = [1.57, 1.66]
wt_th = 0.03
energy_scale_eV = np.linspace(40, 160, 2**9)
time_stamp = 'time_stamp'
data_dir = 'h5_files'
h5_file_name_template = data_dir + '/run{}_all.h5'
response_file_name = data_dir + '/response.h5'
nois_file_name = data_dir + '/noise.h5'
tof_to_energy_conversion_file_name = data_dir + '/time_to_energy.h5'
def h5_file_name_funk(run):
return h5_file_name_template.format(run)
def update_progress(i_evt, n_events, verbose=True):
if (verbose and
((i_evt % (n_events / 100) == 0) or (i_evt == n_events-1))):
progress = (100 * i_evt) / (n_events - 1)
num_squares = 40
base_string = '\r[{:' + str(num_squares) + '}] {}%'
print base_string.format('#'*(progress * num_squares / 100), progress),
sys.stdout.flush()
def list_hdf5_content(group, indent=' '):
for k, v in group.iteritems():
print '{}"{}"'.format(indent, k),
if isinstance(v, h5py.Group):
print 'group with members:'
list_hdf5_content(v, indent=indent + ' ')
elif isinstance(v, h5py.Dataset):
print '\t{} {}'.format(v.shape, v.dtype)
def make_dataset(h5, name, shape, dtype=np.float):
try:
dset = h5.require_dataset(name, shape=shape,
dtype=dtype, exact=True)
except TypeError:
del h5[name]
dset = h5.create_dataset(name, shape=shape, dtype=np.float)
if time_stamp not in dset.attrs.keys():
dset.attrs.create(time_stamp, 0)
return dset
def make_group(h5, name):
try:
group = h5.require_group(name)
except TypeError:
del h5[name]
group = h5.create_group(name)
if time_stamp not in group.attrs.keys():
group.attrs.create(time_stamp, 0)
return group
def older(dset, dset_list):
if (isinstance(dset_list, h5py.Dataset) or
isinstance(dset_list, h5py.Group)):
return dset.attrs[time_stamp] < dset_list.attrs[time_stamp]
return np.any([dset.attrs[time_stamp] < d.attrs[time_stamp] for
d in dset_list])
class Timer_object:
def __init__(self, t):
self.attrs = {'time_stamp': t}
class Tims_stamp_warning(Warning):
pass
def time_stamp_object(h5_object):
try:
h5_object.attrs['time_stamp'] = time.time()
except:
warnings.warn('Could not time stamp the object {}.'.format(
repr(h5_object)))
def get_response(plot=False, verbose=0):
try:
with h5py.File(response_file_name, 'r') as f:
response = f['signal'].value
t = f['signal'].attrs[time_stamp]
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.'
response, t = construct_response(verbose=verbose)
if plot:
with h5py.File(response_file_name, 'r') as f:
time_scale = f['time_scale'].value
plt.figure('response')
plt.clf()
plt.plot(time_scale, response)
return response, t
def construct_response(plot=False, verbose=0):
# The Kr runs
runs = [132, 133, 134, 135, 136]
if verbose > 0:
print 'Loading Kr files for prompt determination.'
h5_file_names = [h5_file_name_template.format(run) for run in runs]
h5_list = []
for file_name in h5_file_names:
update_run_contained_derived_data(file_name, verbose=verbose)
h5_list.append(h5py.File(file_name, 'r+'))
time_scale = h5_list[0]['raw/time_scale'].value
response = np.zeros_like(time_scale)
n_shots = 0
sl = slice(time_scale.searchsorted(prompt_roi[0]),
time_scale.searchsorted(prompt_roi[1], side='right'))
for h5 in h5_list:
response[sl] += h5['raw/time_signal'][:, sl].sum(0)
n_shots += h5['raw/event_time_s'].shape[0]
response /= n_shots
response[sl] = wiener.edgeSmoothing(response[sl], smoothPoints=15)
response /= response.sum()
with h5py.File(response_file_name, 'w') as res_file:
dset = res_file.create_dataset('signal', data=response)
dset.attrs.create(time_stamp, time.time())
res_file.create_dataset('time_scale', data=time_scale)
return get_response(plot=plot, verbose=verbose)
def get_file_names_for_noise_spectrum():
return ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
def get_nois_spectrum(plot=False, verbose=0):
try:
with h5py.File(nois_file_name, 'r') as f:
pass
new_noise = False
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.',
print 'In "get_nois_spectrum()".'
construct_nois_spectrum(plot=plot, verbose=verbose)
new_noise = True
if not new_noise:
make_new_noise = False
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
h5_file_names = get_file_names_for_noise_spectrum()
for h5_name in h5_file_names:
with h5py.File(h5_name, 'r') as h5:
if older(noise, h5['raw']):
make_new_noise = True
if verbose > 0:
print 'Noise was made earlier than the raw data',
print 'in the file', h5_name, 'Make new noise.'
break
elif False:
print 'Noise was made later than the raw data in',
print 'the file', h5_name
if make_new_noise:
construct_nois_spectrum(plot=plot, verbose=verbose)
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
return noise.value, noise.attrs['time_stamp']
def construct_nois_spectrum(plot=False, verbose=0):
h5_file_names = get_file_names_for_noise_spectrum()
for file_name in h5_file_names:
update_run_contained_derived_data(file_name)
empty_shots = []
for i, h5_name in enumerate(h5_file_names):
with h5py.File(h5_name, 'r') as h5:
time_signal_dset = h5['raw/time_signal']
try:
max_signal = h5['max_signal'].value
except KeyError:
max_signal = np.max(time_signal_dset.value, axis=1)
no_x_rays = max_signal < 0.04
if no_x_rays.sum() > 0:
empty_shots.extend(time_signal_dset[no_x_rays, :])
if i == 0:
time_scale = h5['raw/time_scale'].value
if verbose > 0:
print h5_name, 'has', no_x_rays.sum(), 'empty shots'
empty_shots = np.array(empty_shots)
# print len(empty_shots)
# plt.figure('snr')
# plt.clf()
# for shot in empty_shots[:]:
# plt.plot(time_scale, shot)
freq = (np.linspace(0., 1., len(time_scale)) *
1e-3/(time_scale[1] - time_scale[0]))
fft_empty_shots = np.fft.fft(empty_shots, axis=1)
amp = np.mean(np.abs(fft_empty_shots)**2, axis=0)
wt_amp = amp[:]
wt_amp = wavelet_filter.wavelet_filt(amp[1:], thresh=wt_th)
wt_amp[1:] = (wt_amp[1:] + wt_amp[-1:0:-1]) / 2
# plt.figure('fft')
# plt.clf()
# plt.plot(freq, amp)
# plt.plot(freq, wt_amp, 'r')
with h5py.File(nois_file_name, 'w') as f:
dset = f.create_dataset('noise', data=wt_amp)
dset.attrs.create('time_stamp', time.time())
f.create_dataset('freq', data=freq)
return get_nois_spectrum()
def construct_snr_spectrum(h5, plot=False):
noise, t = get_nois_spectrum()
sig_spec = h5['fft_spectrum_mean'].value
freq = h5['fft_freq_axis'].value
wt_spec = wavelet_filter.wavelet_filt(sig_spec, thresh=wt_th)
wt_spec[1:] = (wt_spec[1:] + wt_spec[-1:0:-1]) / 2
snr = (wt_spec - noise) / noise
if plot:
plt.figure('signal and noise')
plt.clf()
plt.semilogy(freq, sig_spec, label='signal')
plt.semilogy(freq, noise, label='noise')
plt.semilogy(freq, wt_spec, label='wt signal')
plt.semilogy(freq, snr, label='snr')
plt.legend(loc='best')
return snr
def check_tof_to_energy_conversion_matrix(plot=False, verbose=0):
try:
with h5py.File(tof_to_energy_conversion_file_name, 'r'):
pass
except IOError:
if verbose > 0:
print 'Could not open the file. Making the conversion matrix.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
_, h5_dict, _ = tof_to_energy.load_tof_to_energy_data(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as trans_h5:
if not older(
trans_h5['matrix'],
[h5['streak_peak_integral'] for h5 in h5_dict.itervalues()] +
[Timer_object(1437117486)]):
return
if verbose > 0:
print 'Conversion to old, remaking it.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
def construc_tof_to_energy_conversion_matrix(plot=False, verbose=0):
M, t, E, time_to_energy_params, tof_prediction_params = \
tof_to_energy.make_tof_to_energy_matrix(
energy_scale_eV=energy_scale_eV, plot=plot, verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'w') as h5:
dset = h5.create_dataset('matrix', data=M)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('time_scale', data=t)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('energy_scale_eV', data=E)
dset.attrs.create('time_stamp', time.time())
for k in time_to_energy_params:
dset = h5.create_dataset(k, data=time_to_energy_params[k].value)
dset.attrs.create('time_stamp', time.time())
for k in tof_prediction_params:
dset = h5.require_dataset(k, (), np.float)
dset[()] = tof_prediction_params[k].value
dset.attrs.create('time_stamp', time.time())
def open_hdf5_file(file_name, plot=False, verbose=0):
try:
# Open the file
h5 = h5py.File(file_name, 'r+')
except BaseException as e:
print 'Could not open the specified hdf5 file "{}".'.format(
file_name)
print 'Message was: {}'.format(e.message)
return -1
return h5
def get_com(x, y):
idx_l, idx_h = fwxm(x, y, 0.0, return_data='idx')
sl = slice(idx_l, idx_h)
return ((x[sl] * y[sl]).sum()) / (y[sl].sum())
def fwxm(x, y, fraction=0.5, return_data=''):
y_max = y.max()
idx_max = y.argmax()
y_f = y_max * fraction
for i in range(idx_max, -1, -1):
if y[i] < y_f:
idx_low = i
break
else:
idx_low = idx_max
for i in range(idx_max, len(x)):
if y[i] < y_f:
idx_high = i
break
else:
idx_high = idx_max
if return_data == 'idx':
return idx_low, idx_high
if return_data == 'limits':
return x[idx_low], x[idx_high]
return (x[idx_low] + x[idx_high]) / 2, x[idx_high] - x[idx_low]
def get_trace_bounds(x, y,
threshold=0.0, min_width=2,
energy_offset=0,
useRel=False, threshold_rel=0.5,
roi=slice(None)):
amp = y[roi]
scale = x[roi]
dx = np.mean(np.diff(x))
if useRel:
threshold_temp = threshold_rel * np.max(amp[np.isfinite(amp)])
if threshold_temp < threshold:
return [np.nan] * 3
else:
threshold_V = threshold_temp
else:
threshold_V = threshold
nPoints = np.round(min_width/dx)
i_min = 0
for i in range(1, amp.size):
if amp[i] < threshold_V:
i_min = i
continue
if i-i_min >= nPoints:
break
else:
return [np.nan] * 3
i_max = amp.size - 1
for i in range(amp.size-1, -1, -1):
if amp[i] < threshold_V:
i_max = i
continue
if i_max-i >= nPoints:
break
else:
return [np.nan] * 3
if i_min == 0 and i_max == amp.size - 1:
return [np.nan] * 3
# print 'min =', min, 'max =', max
val_max = (scale[i_max] + (threshold_V - amp[i_max]) *
(scale[i_max] - scale[i_max - 1]) /
(amp[i_max] - amp[i_max - 1]))
val_min = (scale[i_min] + (threshold_V - amp[i_min]) *
(scale[i_min + 1] - scale[i_min]) /
(amp[i_min + 1] - amp[i_min]))
return val_min, val_max, threshold_V
def update_run_contained_derived_data(file_name, plot=False, verbose=0):
"""Update derived data based on information only in given file.
Add some derived datasetd to the hdf5 file based on the raw data in the
file. The added datasets are:
- Mean of the FEE gas detectors for each shot: fee_mean
- Maximum TOF waveform signal for each shot: max_signal
- Frequency spectrum averaged over all shots: fft_spectrum_mean
- The corresponding frequency axis: fft_freq_axis
- BC2 energy calculated from the beam position: energy_BC2_MeV
- L3 energy corrected based on the BC2 energy: energy_L3_corrected_MeV
"""
if verbose > 0:
print 'Entering "update_run_contained_derived_data()" ',
print 'with file_name={}'.format(file_name)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
# Make the fee data set
raw_fee_dset = raw_group['FEE_energy_mJ']
fee_mean_dset = make_dataset(h5, 'fee_mean', (n_events,))
if older(fee_mean_dset, raw_group):
if verbose > 0:
print 'Updating fee mean dataset'
fee_mean_dset[:] = raw_fee_dset[:, 0: 4].mean(1)
fee_mean_dset.attrs[time_stamp] = time.time()
# Make max signal dataset
time_signal_dset = raw_group['time_signal']
max_sig_dset = make_dataset(h5, 'max_signal', (n_events,))
if older(max_sig_dset, raw_group):
if verbose > 0:
print 'Get the maximum signal for each shot.'
max_sig_dset[:] = np.max(time_signal_dset, axis=1)
max_sig_dset.attrs['time_stamp'] = time.time()
# Make the frequency spectrum
time_scale = raw_group['time_scale'].value
spectrum_dset = make_dataset(h5, 'fft_spectrum_mean', time_scale.shape)
if older(spectrum_dset, [raw_group, max_sig_dset]):
if verbose > 0:
print 'Compute the frequency spectrum of the data.'
max_signal = max_sig_dset.value
use = max_signal > np.sort(max_signal)[-500:][0]
signal = time_signal_dset[use, :]
spectrum_dset[:] = np.mean(np.abs(np.fft.fft(signal, axis=1))**2,
axis=0)
spectrum_dset.attrs['time_stamp'] = time.time()
freq_axis_dset = make_dataset(h5, 'fft_freq_axis', time_scale.shape)
if older(freq_axis_dset, raw_group):
if verbose > 0:
print 'Updating the frequency axis.'
freq_axis_dset[:] = (np.linspace(0., 1e-3, len(time_scale)) /
(time_scale[1] - time_scale[0]))
freq_axis_dset.attrs['time_stamp'] = time.time()
# Calculate the BC2 energy
bc2_energy_dset = make_dataset(h5, 'energy_BC2_MeV', (n_events, ))
if older(bc2_energy_dset, raw_group):
if verbose > 0:
print 'Calculating BC2 energy for the bpm reading.'
# Values comes from a mail from Timothy Maxwell
# The nominal BC2 energy is 5 GeV (was at least when this data was
# recorded). The measurement is the relative offset of the beam
# position in a BPM. The dispersion value is -364.7 mm.
bc2_energy_dset[:] = 5e3 * (1. - raw_group['position_BC2_mm'][:] /
364.7)
bc2_energy_dset.attrs['time_stamp'] = time.time()
# Calculate the corrected L3 energy
l3_energy_cor_dset = make_dataset(h5, 'energy_L3_corrected_MeV',
(n_events, ))
if older(l3_energy_cor_dset, [raw_group, bc2_energy_dset,
Timer_object(1434096408)]):
if verbose > 0:
print 'Calculating corrected L3 energy.'
l3_energy_cor_dset[:] = (raw_group['energy_L3_MeV'][:] -
(bc2_energy_dset[:] - 5000))
l3_energy_cor_dset.attrs['time_stamp'] = time.time()
# Make the phase cavity time filter
pct_filter_dset = make_dataset(h5, 'pct_filter', (n_events, ),
dtype=bool)
if older(pct_filter_dset, [raw_group, Timer_object(0)]):
print h5.filename
pct0 = raw_group['phase_cavity_times'][:, 0]
pct_filter_dset[:] = (0.4 < pct0) & (pct0 < 1.2)
pct_filter_dset.attrs[time_stamp] = time.time()
h5.close()
def update_with_noise_and_response(file_name, plot=False, verbose=0):
"""Update derived data based on noise and response spectra.
Noise spectrum and detector response are determined form many runs. With
these spectra a number of new paramters can be derived. These are:
- snr_spectrum: Signal to Noise ratio spectrum based on the given noise \
spectrum and the average spectrum in the current run.
- filtered_time_signal: Wiegner deconvolution of the time signal based on \
the signal to noise ratio and the detector response function.
- streak_peak_center: Center of the streaking peak in the sense of the \
center of mass of the peak in a given ROI. Based on the deconvoluted \
signal.
- streak_peak_integral: Photoline intensity by integration of the \
deconvoluted spectrum in time domain.
"""
# Make sure that the run contained information is up to date.
update_run_contained_derived_data(file_name, plot, verbose-1)
# Open the file.
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
time_scale = raw_group['time_scale'].value
# Make signal to noise ratio.
snr_dset = make_dataset(h5, 'snr_spectrum', time_scale.shape)
spectrum_dset = h5['fft_spectrum_mean']
if older(snr_dset, [spectrum_dset, raw_group, Timer_object(1434015914)]):
if verbose > 0:
print 'Updating the signal to noise ratio.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
snr_dset[:] = construct_snr_spectrum(h5, plot=plot)
snr_dset.attrs['time_stamp'] = time.time()
# Deconvolute the response function
time_signal_dset = raw_group['time_signal']
deconv_time_signal_dset = make_dataset(h5, 'filtered_time_signal',
time_signal_dset.shape)
if older(deconv_time_signal_dset, [raw_group, snr_dset]):
response, t_response = get_response(plot=plot, verbose=verbose-1)
if verbose > 0:
print 'Deconvolving traces.'
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name),
print ' {} events to process.'.format(n_events)
deconvolver = wiener.Deconcolver(snr_dset.value, response)
for i_evt in range(n_events):
deconv_time_signal_dset[i_evt, :] = deconvolver.deconvolve(
time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
print ''
deconv_time_signal_dset.attrs['time_stamp'] = time.time()
# Calculate the center of mass of the streak peak
time_com_dset = make_dataset(h5, 'streak_peak_center', (n_events, ))
photo_line_intensity_dset = make_dataset(h5, 'streak_peak_integral',
(n_events, ))
if older(time_com_dset, [deconv_time_signal_dset,
Timer_object(1443006988)]):
if verbose > 0:
print 'Calculating streak peak center in time.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
streak_sl = slice(np.searchsorted(time_scale, streak_time_roi[0]),
np.searchsorted(time_scale, streak_time_roi[1],
side='right'))
time_scale_streak = time_scale[streak_sl]
####
# Center of mass calculation
# for i_evt in range(n_events):
# time_com_dset[i_evt] = get_com(
# time_scale_streak,
# deconv_time_signal_dset[i_evt, streak_sl])
# update_progress(i_evt, n_events, verbose)
####
# Fit of Gaussian
deconv_time_signal = deconv_time_signal_dset.value
time_com = np.zeros(time_com_dset.shape)
photo_line_intensity = np.zeros(photo_line_intensity_dset.shape)
mean_signal = deconv_time_signal[:, streak_sl].mean(axis=0)
mod = lmfit.models.GaussianModel()
params = lmfit.Parameters()
params.add_many(('amplitude', 1, True, 0),
('center', time_scale_streak[np.argmax(mean_signal)],
True, min(time_scale_streak), max(time_scale_streak)),
('sigma', 1e-3, True, 0))
# fit to mean in order to get start parameters for the shot fits
out = mod.fit(mean_signal, x=time_scale_streak, params=params)
for k in params:
params[k].value = out.params[k].value
for i_evt in range(n_events):
out = mod.fit(deconv_time_signal[i_evt, streak_sl],
params, x=time_scale_streak)
time_com[i_evt] = out.params['center'].value
photo_line_intensity[i_evt] = out.params['amplitude'].value
update_progress(i_evt, n_events, verbose)
if plot:
time_scale_streak = time_scale[streak_sl]
plt.figure('peak finding time domain')
plt.clf()
plt.plot(time_scale_streak, mean_signal)
plt.plot(time_scale_streak, out.best_fit)
if verbose > 0:
print ''
time_com_dset[:] = time_com
time_com_dset.attrs['time_stamp'] = time.time()
photo_line_intensity_dset[:] = photo_line_intensity
photo_line_intensity_dset.attrs['time_stamp'] = time.time()
h5.close()
def update_with_time_to_energy_conversion(file_name, plot=False, verbose=0):
""" Make derived data based on time to energy conversion."""
update_with_noise_and_response(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
deconv_time_signal_dset = h5['filtered_time_signal']
energy_scale_dset = make_dataset(h5, 'energy_scale_eV',
energy_scale_eV.shape)
energy_trace_dset = make_dataset(h5, 'energy_signal',
(n_events, len(energy_scale_eV)))
check_tof_to_energy_conversion_matrix(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as tof_to_e_h5:
if older(energy_scale_dset, [tof_to_e_h5['matrix'],
deconv_time_signal_dset,
Timer_object(1443190000)]):
if verbose > 0:
print 'Updating time to energy conversion.',
print ' In "update_with_time_to_energy_conversion()"',
print ' with {}'.format(file_name)
# Get the transformation matrix from file
M = tof_to_e_h5['matrix'].value
# Update the energy scale
energy_scale_dset[:] = tof_to_e_h5['energy_scale_eV'].value
energy_scale_dset.attrs['time_stamp'] = time.time()
# Get the photon energy prediction parameters
params = (tof_to_energy.photon_energy_params() +
tof_to_energy.tof_prediction_params())
for k in params:
params[k].value = tof_to_e_h5[k].value
if verbose > 0:
print 'Computing energy spectra.'
for i_evt in range(n_events):
# Energy spectra
energy_trace_dset[i_evt, :] = M.dot(
deconv_time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
if verbose > 0:
print ''
energy_trace_dset.attrs['time_stamp'] = time.time()
# Calculate energy trace properties
spectral_properties_group = h5.require_group('spectral_properties')
spectral_center_dset = make_dataset(spectral_properties_group,
'center_eV', (n_events, ))
spectral_width_dset = make_dataset(spectral_properties_group,
'width_eV', (n_events, ))
spectral_threshold_dset = make_dataset(spectral_properties_group,
'threshold', (n_events, ))
spectral_gaussian_center_dset = make_dataset(spectral_properties_group,
'gaussian_center',
(n_events,))
if older(spectral_center_dset, [energy_trace_dset,
Timer_object(1443421560)]):
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 125))
energy_scale = energy_scale[sl]
model = lmfit.models.GaussianModel()
if verbose > 0:
print 'Calculating spectral center and width:',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
for i_evt in range(n_events):
energy_trace = energy_trace_dset[i_evt, sl]
t_start, t_end, spectral_threshold_dset[i_evt] = \
get_trace_bounds(energy_scale,
energy_trace,
threshold=8e-5,
min_width=3,
# useRel=True,
# threshold_rel=0.3
)
center = (t_start + t_end) / 2
spectral_center_dset[i_evt] = center
width = t_end - t_start
spectral_width_dset[i_evt] = width
# Calculate center of mass
peak_sl = slice(energy_scale.searchsorted(t_start - width/2),
energy_scale.searchsorted(t_end + width/2,
side='right'))
peak_trace = energy_trace[peak_sl]
peak_scale = energy_scale[peak_sl]
# spectral_com_dset[i_evt] = (np.sum(peak_scale * peak_trace) /
# np.sum(peak_trace))
if len(peak_trace) > 3:
out = model.fit(peak_trace, x=peak_scale,
center=center, sigma=width/4,
amplitude=peak_trace.max() * width / 2)
spectral_gaussian_center_dset[i_evt] = out.values['center']
else:
spectral_gaussian_center_dset[i_evt] = np.nan
update_progress(i_evt, n_events, verbose)
spectral_center_dset.attrs['time_stamp'] = time.time()
spectral_width_dset.attrs['time_stamp'] = time.time()
spectral_threshold_dset.attrs['time_stamp'] = time.time()
spectral_gaussian_center_dset.attrs['time_stamp'] = time.time()
if plot:
selected_shots = list(np.linspace(0, n_events, 16, endpoint=False))
plt.figure('peak properties')
plt.clf()
_, ax_list = plt.subplots(4, 4, sharex=True, sharey=True,
num='peak properties')
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 130))
energy_scale = energy_scale[sl]
for i, shot in enumerate(selected_shots):
energy_trace = energy_trace_dset[shot, :]
ax = ax_list.flatten()[i]
# plt.plot(energy_scale - pe_energy_prediction_dset[shot],
ax.plot(energy_scale, energy_trace[sl])
c = spectral_center_dset[shot]
w = spectral_width_dset[shot]
th = spectral_threshold_dset[shot]
ax.plot([c-w/2, c+w/2], [th] * 2)
# Calculate main photoline area
main_photoline_area = make_dataset(spectral_properties_group,
'main_photoline_area', (n_events, ))
if older(main_photoline_area, energy_trace_dset):
if verbose:
print 'Computing photoline area'
e_scale = energy_scale_dset.value
dE = np.mean(np.diff(e_scale))
e_slice = slice(np.searchsorted(e_scale, 55), None)
for i_evt in range(n_events):
raw_A, _ = area_fill.zero_crossing_area(
energy_trace_dset[i_evt, e_slice])
main_photoline_area[i_evt] = raw_A * dE
update_progress(i_evt, n_events, verbose)
time_stamp_object(main_photoline_area)
##########
# Calculate electron energy prediction
e_energy_prediction_params_group = make_group(h5,
'e_energy_prediction_params')
if older(e_energy_prediction_params_group, [spectral_gaussian_center_dset,
Timer_object(1444931900)]):
if verbose > 0:
print 'Fit the electron energy prediction parameters.',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
selection = np.isfinite(spectral_gaussian_center_dset.value)
# &
# (0.4 < raw_group['phase_cavity_times'][:, 0]) &
# (raw_group['phase_cavity_times'][:, 0] < 1.1))
spectral_gaussian_center = spectral_gaussian_center_dset[selection]
if len(spectral_gaussian_center) == 0:
return
var_dict = {
'l3_energy': raw_group['energy_L3_MeV'][selection],
'bc2_energy': h5['energy_BC2_MeV'][selection],
# 'fee': h5['fee_mean'][selection],
'e_energy': spectral_gaussian_center
}
prediction_params = \
tof_to_energy.e_energy_prediction_model_start_params(**var_dict)
try:
res = lmfit.minimize(tof_to_energy.e_energy_prediction_model,
prediction_params,
kws=var_dict)
fit_worked = True
except:
fit_worked = False
if verbose > 0 and fit_worked:
print '\nPrediction params:'
lmfit.report_fit(res)
# Create or update the parameters from the fit in the group
for k, v in prediction_params.iteritems():
d = e_energy_prediction_params_group.require_dataset(
k, (), np.float)
d[()] = v.value if fit_worked else np.nan
# Remove old parameters that should not be there
for k in set(e_energy_prediction_params_group.keys()).difference(
set(prediction_params.keys())):
del e_energy_prediction_params_group[k]
e_energy_prediction_params_group.attrs[time_stamp] = time.time()
if plot:
deviation = tof_to_energy.e_energy_prediction_model(
prediction_params, **var_dict)
plt.figure('e energy prediction {}'.format(
h5.filename.split('/')[-1]))
plt.clf()
plt.subplot(221)
# plt.plot(spectral_gaussian_center, deviation, '.')
plt.scatter(spectral_gaussian_center, deviation,
s=4, c=h5['energy_BC2_MeV'][selection],
linewidths=(0,), alpha=1)
plt.xlabel('electron energy (eV)')
plt.ylabel('prediction residual (eV)')
x_range = plt.xlim()
y_range = plt.ylim()
img, _, _ = np.histogram2d(spectral_gaussian_center, deviation,
bins=2**7, range=[x_range, y_range])
img = img.T
plt.subplot(222)
plt.imshow(img, aspect='auto', interpolation='none',
origin='lower', extent=x_range + y_range)
hist, hist_edges = np.histogram(deviation,
bins=2**5, range=(-3, 3))
hist_centers = (hist_edges[: -1] + hist_edges[1:])/2
plt.subplot(223)
gauss_model = lmfit.models.GaussianModel()
fit_out = gauss_model.fit(hist, x=hist_centers)
lmfit.report_fit(fit_out)
plt.bar(hist_edges[:-1], hist, width=np.diff(hist_edges))
plt.plot(hist_centers, fit_out.best_fit, 'r', linewidth=2)
plt.subplot(224)
plt.plot(spectral_gaussian_center, h5['energy_BC2_MeV'][selection],
'.')
def update_with_energy_prediction(file_name, plot=False, verbose=0):
update_with_time_to_energy_conversion(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
prediction_map = {'117': 'h5_files/run118_all.h5',
'114': 'h5_files/run115_all.h5',
'113': 'h5_files/run112_all.h5',
'108': 'h5_files/run109_all.h5',
'101': 'h5_files/run100_all.h5',
'102': 'h5_files/run100_all.h5'}
pe_energy_prediction_dset = make_dataset(
h5, 'photoelectron_energy_prediction_eV', (n_events,))
spectral_properties_group = h5['spectral_properties']
# spectral_gaussian_center_dset = spectral_properties_group[
# 'gaussian_center']
fee_dset = h5['fee_mean']
energy_BC2_dset = h5['energy_BC2_MeV']
energy_L3_dset = raw_group['energy_L3_MeV']
for k, v in prediction_map.iteritems():
if k in file_name:
update_with_time_to_energy_conversion(v, plot=False,
verbose=verbose-1)
ref_h5 = open_hdf5_file(file_name)
e_energy_prediction_params_group = \
ref_h5['e_energy_prediction_params']
break
else:
e_energy_prediction_params_group = h5['e_energy_prediction_params']
if older(pe_energy_prediction_dset, [e_energy_prediction_params_group,
fee_dset,
energy_BC2_dset,
raw_group,
Timer_object(1444981500)]):
if verbose > 0:
print 'Updating energy prediction.',
print ' In "update_with_energy_prediction()" with {}'.format(
file_name)
prediction_params = lmfit.Parameters()
for k in e_energy_prediction_params_group:
prediction_params.add(k, e_energy_prediction_params_group[k][()])
var_dict = {
'l3_energy': energy_L3_dset.value,
'bc2_energy': energy_BC2_dset.value,
'fee': fee_dset.value
}
try:
pe_energy_prediction_dset[:] = \
tof_to_energy.e_energy_prediction_model(prediction_params,
**var_dict)
except:
pe_energy_prediction_dset[:] = np.nan
pe_energy_prediction_dset.attrs[time_stamp] = time.time()
##########
# Make the christmas three histogram
n_spectral_center_bins = 2**7
n_spectral_width_bins = 2**7
spectral_center_axis_dset = make_dataset(spectral_properties_group,
'center_axis_eV',
(n_spectral_center_bins, ))
spectral_width_axis_dset = make_dataset(spectral_properties_group,
'width_axis_eV',
(n_spectral_width_bins, ))
spectral_histogram_dset = make_dataset(spectral_properties_group,
'histogram',
(n_spectral_width_bins,
n_spectral_center_bins))
spectral_center_dset = spectral_properties_group['center_eV']
spectral_width_dset = spectral_properties_group['width_eV']
pct_filter_dset = h5['pct_filter']
if older(spectral_histogram_dset, [spectral_center_dset,
spectral_width_dset,
pe_energy_prediction_dset,
pct_filter_dset,
Timer_object(2444203160)]):
if verbose > 0:
print 'Making the christmas tree plot.',
print ' In "update_with_energy_prediction()"',
print ' with {}'.format(file_name)
spectral_width_axis_dset[:] = np.linspace(0, 35, n_spectral_width_bins)
spectral_width_axis_dset.attrs['time_stamp'] = time.time()
spectral_center_axis_dset[:] = np.linspace(-20, 20,
n_spectral_center_bins)
spectral_center_axis_dset.attrs['time_stamp'] = time.time()
# I = (pct_filter_dset.value &
# (-0.1 < raw_group['phase_cavity_times'][:, 1]) &
## (raw_group['phase_cavity_times'][:, 1] < 0.05) &
## (0.75 < raw_group['phase_cavity_times'][:, 0]) &
## (raw_group['phase_cavity_times'][:, 0] < 0.85) &
# (0.065 < raw_group['power_meter_V'].value) &
# (raw_group['power_meter_V'].value < 0.1))
I = np.ones(pct_filter_dset.shape, dtype=bool)
hist = aol_plotting.center_histogram_2d(
spectral_center_dset[I] - pe_energy_prediction_dset[I],
spectral_width_dset[I],
spectral_center_axis_dset[:],
spectral_width_axis_dset[:])
hist[hist == 0] = np.nan
spectral_histogram_dset[:] = hist
spectral_histogram_dset.attrs['time_stamp'] = time.time()
if plot:
plt.figure('christmas tree {}'.format(h5.filename.split('/')[-1]))
plt.clf()
plt.imshow(spectral_histogram_dset[:], aspect='auto',
interpolation='none', origin='lower',
extent=(np.min(spectral_center_axis_dset),
np.max(spectral_center_axis_dset),
np.min(spectral_width_axis_dset),
np.max(spectral_width_axis_dset)))
plt.xlabel('center (eV)')
plt.ylabel('width (eV)')
plt.colorbar()
plt.savefig('figures/christmas_tree_{}.png'.format(
h5.filename.split('/')[-1].split('.')[0]))
h5.close()
def load_file(file_name, plot=False, verbose=0):
""" Load file and make sure it is up to date."""
# if verbose > 0:
# print 'Entering "load_file()" with file_name={}'.format(file_name)
update_with_energy_prediction(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
if verbose > 0:
print 'File {} processed.'.format(h5.file)
print 'It contains', n_events, 'events.'
if verbose > 1:
list_hdf5_content(h5)
return h5
def touch_all_files(verbose=2):
file_names = ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
for name in file_names:
load_file(name, verbose=verbose)
if __name__ == '__main__':
# Parset the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--hdf5_file', type=str,
default='h5_files/run108_all.h5',
help='Path to hdf5 file to process')
parser.add_argument('--plot', action='store_true',
help='Display plots. Default: no plots.')
parser.add_argument('-v', '--verbose', action='count',
help='increase output verbosity')
args = parser.parse_args()
# Unpack the parser arguments.
hdf5_file = args.hdf5_file
plot = args.plot
verbose = args.verbose
# If plotting is requested, ryn pyplot in the interactive mode.
if plot:
plt.ion()
if verbose > 0:
print 'Get the noise spectrum just to make sure it is up to date.'
get_nois_spectrum(plot=plot, verbose=verbose)
# Load the given file.
if verbose > 0:
print 'Load the requested file: {}'.format(hdf5_file)
h5 = load_file(hdf5_file, verbose=verbose, plot=plot)
# Get the raw group of the file.
raw_group = h5['raw']
# Number of events in the file.
n_events = len(raw_group['event_time_s'])
# Time trace rellated information.
raw_time = raw_group['time_scale'].value
raw_traces_dset = raw_group['time_signal']
filtered_traces = h5['filtered_time_signal']
# Pulse energy
raw_fee_dset = raw_group['FEE_energy_mJ']
n_fee = raw_fee_dset.shape[1]
# frequency domain
freq_axis = h5['fft_freq_axis'].value
fft_mean = h5['fft_spectrum_mean'].value
snr = h5['snr_spectrum'].value
if plot and False:
if verbose > 0:
print 'Plotting fee correlations.'
plt.figure('fee')
plt.clf()
ax = None
for i in range(n_fee):
for k in range(n_fee):
ax = plt.subplot(n_fee, n_fee, i + k*n_fee + 1,
sharex=ax, sharey=ax)
ax.plot(raw_fee_dset[:, i], raw_fee_dset[:, k], '.')
if i > 0:
plt.setp(ax.get_yticklabels(), visible=False)
if k < n_fee-1:
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
if verbose > 0:
print 'Plotting fee histogram.'
plt.figure('fee histogram')
plt.clf()
plt.hist(h5['fee_mean'].value, bins=100)
if plot:
if verbose > 0:
print 'Plot signal maximium histogram.'
plt.figure('signal hist')
plt.clf()
plt.hist(h5['max_signal'], bins=100)
if plot:
if verbose > 0:
print 'Plot spectr'
plt.figure('fft')
plt.clf()
plt.semilogy(freq_axis, fft_mean, label='average spectrum')
plt.semilogy(freq_axis, snr, label='snr')
plt.legend(loc='best')
# Plot some traces
if plot:
if verbose > 0:
print 'Plotting traces'
trace_fig = plt.figure('traces {}'.format(hdf5_file))
trace_fig.clf()
raw_mean_tr = raw_traces_dset.value.mean(0)
deconv_mean_tr = filtered_traces.value.mean(0)
rand_event = np.random.randint(n_events)
response, _ = get_response(plot=False, verbose=verbose)
plt.plot(raw_time, raw_traces_dset[rand_event, :],
label='single trace')
plt.plot(raw_time, filtered_traces[rand_event, :],
label='Deconv single trace')
plt.plot(raw_time, raw_mean_tr, label='mean trace')
plt.plot(raw_time, deconv_mean_tr,
label='Deconv mean')
plt.legend(loc='best')
# Plot the phase cavity times
pct = raw_group['phase_cavity_times']
plt.figure('Phase cavity times')
plt.clf()
# pc_selection = (np.isfinite(np.sum(pct, axis=1)) &
# (pct[:, 0] > -2) & (pct[:, 0] < 2) &
# (pct[:, 1] > -2) & (pct[:, 1] < 2))
# (pct[:, 0] > -50) & (pct[:, 0] < 50))
pc_selection = h5['pct_filter'].value
for i in range(2):
plt.subplot(1, 3, i+1)
plt.title('Time {}'.format(i))
hist, hist_edges = np.histogram(pct[pc_selection, i], bins=100)
plt.bar(hist_edges[: -1], hist, width=np.diff(hist_edges))
plt.subplot(133)
plt.plot(pct[pc_selection, 0], pct[pc_selection, 1], '.')
# Plot energy traces and photon energy diagnostics
pe_energy_dset = h5['photoelectron_energy_prediction_eV']
energy_scale = h5['energy_scale_eV'][:]
energy_signal_dset = h5['energy_signal']
selected_shots = np.linspace(0, n_events, 100, endpoint=False, dtype=int)
plt.figure('Energy spectra')
plt.clf()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
dy = 1e-5
for i, shot in enumerate(selected_shots):
ax1.plot(energy_scale, energy_signal_dset[shot, :] + dy * i)
ax2.plot(energy_scale - pe_energy_dset[shot],
energy_signal_dset[shot, :] + dy * i)
ax2.set_xlim(-20, 25)
# %%
# Plot the photoline area
plt.figure('photoline area')
plt.clf()
spectral_properties_group = h5['spectral_properties']
main_photoline_area = spectral_properties_group[
'main_photoline_area'].value
fee = h5['fee_mean'].value
I = np.isfinite(main_photoline_area) & np.isfinite(fee)
p = np.polyfit(fee[I], main_photoline_area[I], 2)
fee_ax = np.linspace(min(fee[I]), max(fee[I]), 2**5)
plt.subplot(121)
plt.plot(fee, main_photoline_area, '.')
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
plt.subplot(122)
plt.hist2d(fee[I], main_photoline_area[I], bins=2**7)
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
| gpl-2.0 |
TiKunze/CanMics | src/python/01_SingleChannel/3pop/EIN/HeHiVariation/RUM_Detektor_HeHi_2ndversion_cluster.py | 1 | 5917 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 17:15:03 2015
@author: Tim Kunze
Copyright (C) 2015, Tim Kunze. All rights reserved.
This script is a modified version of the RUM Detector:
instead of sweeping over He and Hi in every diagram, we sweep over lenge and intensity of the impulse (as in the actiation plot)
"""
###############################################################################
#
# Imports
#
###############################################################################
import numpy as np
import sys
import scipy as sc
import os # to enable some C commands (cwd,listdir)
currpath = '/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework/RUM_Exploration'
os.chdir(currpath)
import sys
sys.path.append("/usr/wrk/people9/tiku2449/EI_RUM/001_Unifying_Framework")
import Models.JuRClass_fin_006 as FCV
import Simulation_And_Analysis.Sim_Simulation_003 as simulate
while len(sys.argv) > 1:
option = sys.argv[1]; del sys.argv[1]
if option == '-he': he = float(sys.argv[1].replace(',','.')); del sys.argv[1]
elif option == '-hi': hi = float(sys.argv[1].replace(',','.')); del sys.argv[1]
else:
print 'Options invalides :',option,'->',sys.argv[0]
#%%
###############################################################################
#
# Main
#
###############################################################################
dt = 1000e-6
JR = FCV.JuR()
JR.integ_stepsize = dt
JR.n=2
JR.coupling = np.array([[0.0,0.0],[0.0,0.0]]) #
JR.distanceMatrix = np.array([[0.0,0.01],[0.0,0.0]]) # important!!
JR.init = np.zeros((8,JR.n))
JR.c_e=0 # only relevant for connected areas
JR.c_i=0 # only relevant for connected areas
JR.c_py=30 # only relevant for connected areas
JR.configure()
#%%
###############################################################################
#
## Activation Diagram RUM with modulation of input to II
#
###############################################################################
t_simulation = 5
N=t_simulation/dt
time = np.arange(0,N*dt,dt)
JR.H_e=he
JR.H_i=hi
p_sim_py = np.zeros((N,JR.n))
p_sim_e = np.zeros((N,JR.n))
p_sim_i = np.zeros((N,JR.n))
length_range = np.arange(500,1501,10)
intensity_range = np.arange(50,251,2)
state_grid = np.zeros((len(intensity_range),len(length_range),3))
i=0
j=0
for ins in intensity_range:
j=0
for le in length_range:
p_sim_e = np.zeros((N,JR.n))
p_sim_e[1000:1000+le,:] = ins
signal,sig_ei,sig_ii,impact,data = simulate.simulate_network_006(JR,p_sim_py,p_sim_e,p_sim_i,t_simulation)
state_grid[i,j,0] = np.mean(signal[999,0])
state_grid[i,j,1] = np.mean(signal[4000:,0])
state_grid[i,j,2] = np.max(signal[900:,0])
print "len: %.0f | int: %.0f | he: %.2fmV | hi: %2.fmV" %(le, ins, he*1000,hi*1000)
j+=1
i+=1
#dataa=length_range,intensity_range,state_grid
np.save('RUM_Dec_meas_full2_le500t1500i10msInt50t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),state_grid)
#np.save('RUM_Dec_sim_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),signal)
#np.save('RUM_Dec_data_le500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000),dataa)
#
#
#def cleargrid(state_grid):
# [x,y,z]=np.shape(state_grid)
# for i in range(x):
# for j in range(y):
# if state_grid[i,j,1] > 0.004:
# state_grid[i,j,1] = 0.006
# elif state_grid[i,j,1] < 0.004:
# state_grid[i,j,1] = -0.002
# else:
# raise ValueError('Error')
# print "ERROR"
#
# return state_grid
###
#%% Analysis
#import matplotlib.pyplot as plt
#hirange = np.arange(19,26,1)*1e-3
#herange = np.arange(2.5,4.1,0.25)*1e-3
#
#
#glob_low_val=1e3
#glob_high_val=-1e3
#
#for he in herange:
# for hi in hirange:
# a=np.load('RUM_Detector2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.npy' %(he*1000,hi*1000))
#
#
# length_range=a[0]
# intensity_range=a[1]
# state_grid=a[2]
#
# low_lenge=np.min(length_range)
# high_lenge=np.max(length_range)
# low_inte=np.min(intensity_range)
# high_inte=np.max(intensity_range)
#
# if np.min(state_grid[:,:,1]) < glob_low_val:
# glob_low_val=np.min(state_grid[:,:,1])
# print he,hi,glob_low_val
# if np.max(state_grid[:,:,1]) > glob_high_val:
# glob_high_val=np.max(state_grid[:,:,1])
# print he,hi,glob_high_val,1
#
# plt.figure(2)
# plt.clf()
# state_grid=cleargrid(state_grid)
# plt.imshow(np.flipud(state_grid[:,:,1]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,he:%.0fms, hi:%.0fpps' %(he*1000,hi*1000))
# cb=plt.colorbar()
# plt.savefig('RUM_Detektor2_Imple500t1500i10msInt70t250i2_He%.2fmV_Hi%.1fmV.pdf' %(he*1000,hi*1000), format='pdf', dpi=1000)
# plt.close()
# #
#
# # baselevel plot hier zwecklos, da baselevel bei allen stimuli gleich
# plt.figure(2)
# plt.clf()
# #state_grid=cleargrid(state_grid)
# plt.clf()
# plt.imshow(np.flipud(state_grid[:,:,0]), aspect='auto', extent = (low_lenge,high_lenge,low_inte,high_inte),interpolation='none')
# plt.ylabel('intensity')
# plt.xlabel('length')
# plt.title('Detektor Diagram,Baselevels,he:%.0fmV, hi:%.0fmV' %(he*1000,hi*1000))
# plt.colorbar()
# #plt.savefig('RUM_Detektor_Baselevel_Imple%.0fmsInt%.0f_He2.5t7.0i0k05_Hi10t25i0k1_1.pdf' %(lenge,inte), format='pdf', dpi=1000)
#
# plt.close('all')
| gpl-3.0 |
GitYiheng/reinforcement_learning_test | test00_previous_files/mountaincar_q_learning.py | 1 | 4304 | import gym
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDRegressor
class FeatureTransformer:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(1000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=4.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=3.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf5", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf6", RBFSampler(gamma=0.5, n_components=n_components)),
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observation):
scaled = self.scaler.transform(observation)
return self.featurizer.transform(scaled)
class Model:
def __init__(self, env, feature_transformer, learning_rate):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(learning_rate)
model.partial_fit(feature_transformer.transform([env.reset()]), [0])
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
return np.array([m.predict(X)[0] for m in self.models])
def update(self, s, a, G):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
def play_one(model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 1000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
# Update the model
G = reward + gamma*np.max(model.predict(observation)[0])
model.update(prev_observation, action, G)
totalreward += reward
iters += 1
return totalreward
def plot_cost_to_go(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Cost-To-Go == -V(s)')
ax.set_title("Cost-To-Go Function")
fig.colorbar(surf)
plt.show()
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main():
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
for n in range(N):
eps = 0.1*(0.97**n)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, model)
if __name__ == '__main__':
main() | mit |
aemerick/galaxy_analysis | particle_analysis/sn_rate.py | 1 | 9054 | #import yt.mods as yt
import yt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import glob
__all__ = ['future_snr', 'snr']
_core_collapse_labels = ["SNII", "II", "2", "SN_II", "TypeII", "Type 2",
"Type II", "type II", "typeII", 'core collapse']
_snia_labels = ["SN1a", "SNIa", "Type1a", "TypeIa", "Type Ia", "Type 1a",
"type 1a", "type Ia", "type ia", "type1a", "typeIa"]
_agb_labels = ['AGB', 'agb']
def future_snr(ds, data, times = None, sn_type = 'II'):
"""
Looks forward from current time to compute future (projected) SN
rate
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 2.0* yt.units.Myr
times = np.arange(np.min(creation_time) - bin_spacing*2.0, currentTime, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(current_time, current_time + 2000.0, bin_spacing)
times = times * yt.units.Myr
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units('Msun').value
creation_time = data['creation_time'].convert_to_units('Myr').value
lifetimes = data['dynamical_time'].convert_to_units('Myr').value
pt = data['particle_type']
if any( [sn_type in x for x in _core_collapse_labels]):
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt == 11) * (birth_mass <= collapse_threshold) *\
(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12) * (mass > 0.0)
elif any( [sn_type in x for x in _agb_labels]):
pcut = (pt == 11) * (birth_mass < agb_threshold)
explosion_times = creation_time[pcut] + lifetimes[pcut]
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
def snr(ds, data, times = None, sn_type = 'II'):
"""
Computes the supernova rate of the desired time for a given dataset
as a function of time. The way the particle types and particle lifetimes
are handled, this can be done for the entire galaxy history using a single
snapshot, rather than having to sort through each dump.
One can provide sample times using "times" argument, or leave it alone for
a 10 Myr sample spacing from t = 0 to t = current_time. If a single value
is provided, this is taken to be the sample spacing (dt), sampled over
t = 0 to t = current_time. Units are assumed to be Myr if not provided.
Accounts for direct collapse model in computing SNII rates using
parameter file.
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 10.0 * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)
times = times *yt.units.Myr
# load particle properties
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units("Msun").value
creation_time = data['creation_time'].convert_to_units('Myr').value
metallicity = data['metallicity_fraction'].value
# lifetimes = data['dynamical_time'].convert_to_units('Myr').value
lifetimes = data[('io','particle_model_lifetime')].convert_to_units('Myr').value
pt = data['particle_type'].value
# check to see if there are any SN candidates in the first place
# if not any([ == x for x in np.unique(pt)]):
# print "no supernova of type " + sn_type + " found"
# return times, np.zeros(np.size(times.value) - 1)
# looking for core collapse supernova rate
if any( [sn_type in x for x in _core_collapse_labels]):
pcut = (pt == 13)
# ignore stars that did not actually go supernova
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
if not any([(x <= collapse_threshold)*(x > agb_threshold) for x in birth_mass[pcut]]):
print("no core collapse supernova present, only direct collapse")
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (birth_mass <= collapse_threshold)*(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12)
if np.size(mass[pcut]) < 1:
return times, np.zeros(np.size(times))
# SNIa are the ones that are just masless tracers, rest are WD
if not any(mass[pcut] == 0.0):
print("no Type Ia supernova, only white dwarfs")
print("N_WD = %i -- Lowest mass = %.3f Msun"%(np.size(mass[pcut]), np.min(mass[pcut])))
print("Current time = %.2E Myr - Next to explode at t = %.2E Myr"%(current_time, np.min(lifetimes[pcut] + creation_time[pcut])))
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (mass == 0.0)
elif any( [sn_type in x for x in _agb_labels]):
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt > 11) # all dead stars
pcut = pcut * (birth_mass <= agb_threshold)
# pcut = (pt == 12)
# pcut *= (mass > 0.0)
# pcut = pcut + ( (pt == 13) * (birth_mass <= agb_threshold))
else:
print("sn_type :" + sn_type + " not a valid option - check spelling")
return -1
#
# now get the explosion times for all supernova
# when stars go SN, lifetime is set to be lifetime*huge_number
# therefore, explosion time can be backed out as:
#
explosion_times = creation_time[pcut] + lifetimes[pcut]/ds.parameters['huge_number']
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
if __name__ == '__main__':
# example usage - uses most recent data file
log = False
ds_list = np.sort( glob.glob('./DD????/DD????'))
ds = yt.load(ds_list[-1])
data = ds.all_data()
dt = 25.0
times = np.arange(0.0, ds.current_time.convert_to_units('Myr').value + dt, dt)
times = times*yt.units.Myr
times, snrII = snr(ds, data, times = times, sn_type = 'TypeII')
times, snrIa = snr(ds, data, times = times, sn_type = "TypeIa")
center = 0.5 * (times[1:] + times[:-1])
fig, ax = plt.subplots(figsize=(8,8))
snialabel = 'Type Ia x 10'
sniilabel = 'Core Collapse'
ftimes = np.arange(ds.current_time.convert_to_units('Myr').value,
ds.current_time.convert_to_units('Myr').value + 800.0 + 10, 10)
ftimes = ftimes * yt.units.Myr
ftimes, fsnrII = future_snr(ds, data, times = ftimes, sn_type = 'TypeII')
ftimes, fsnrIa = future_snr(ds, data, times = ftimes, sn_type = 'TypeIa')
if log:
ax.plot(center/1.0E6, snrII*1.0E6, color = 'black', lw = 3, ls = '-', label = sniilabel)
ax.plot(center/1.0E6, snrIa*1.0E6*10, color = 'black', lw = 3, ls = '--', label = snialabel)
x.semilogy()
else:
ax.step(times[:-1]/1.0E6, snrII*1.0E6, color ='black', lw = 3, ls = '-', label = sniilabel)
ax.step(times[:-1]/1.0E6, snrIa*1.0E6 * 10, color ='orange', lw = 3, ls = '-', label = snialabel)
ax.step(ftimes[:-1]/1.0E6, fsnrII*1.0E6, color = 'black', lw = 3, ls = ':')
ax.step(ftimes[:-1]/1.0E6, fsnrIa*1.0E6 * 10, color = 'orange', lw = 3, ls = ':')
ax.set_xlabel('Time (Myr)')
ax.set_ylabel(r'SNR (Myr$^{-1}$)')
ax.set_ylim( np.min( [np.min(snrIa), np.min(snrII)])*1.0E6,
np.max( [np.max(snrIa), np.max(snrII)])*1.25*1.0E6)
ax.plot( [ds.current_time.convert_to_units('Myr').value]*2, ax.get_ylim(), ls = '--', lw = 3, color = 'black')
ax.legend(loc ='best')
plt.tight_layout()
ax.minorticks_on()
plt.savefig('snr.png')
| mit |
CallaJun/hackprince | indico/matplotlib/tests/test_style.py | 10 | 1977 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import tempfile
from contextlib import contextmanager
import matplotlib as mpl
from matplotlib import style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
import six
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
# Write style settings to file in the temp directory.
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, temp_file), 'w') as f:
for k, v in six.iteritems(settings):
f.write('%s: %s' % (k, v))
# Add temp directory to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tempdir)
style.reload_library()
try:
yield
finally:
shutil.rmtree(tempdir)
style.reload_library()
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
def test_use_url():
with temp_style('test', DUMMY_SETTINGS):
with style.context('https://gist.github.com/adrn/6590261/raw'):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| lgpl-3.0 |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_ticker.py | 3 | 8574 | # (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from matplotlib.axes import Axes
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.ticker import LatitudeFormatter, LongitudeFormatter
def test_LatitudeFormatter_bad_axes():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter_bad_projection():
formatter = LatitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_axes():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(Axes, projection=ccrs.PlateCarree()))
message = 'This formatter can only be used with cartopy axes.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LongitudeFormatter_bad_projection():
formatter = LongitudeFormatter()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=ccrs.Orthographic()))
message = 'This formatter cannot be used with non-rectangular projections.'
with pytest.raises(TypeError, message=message):
formatter(0)
def test_LatitudeFormatter():
formatter = LatitudeFormatter()
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'90\u00B0N']
assert result == expected
def test_LatitudeFormatter_degree_symbol():
formatter = LatitudeFormatter(degree_symbol='')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90S', u'60S', u'30S', u'0',
u'30N', u'60N', u'90N']
assert result == expected
def test_LatitudeFormatter_number_format():
formatter = LatitudeFormatter(number_format='.2f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-90, -60, -30, 0, 30, 60, 90]
result = [formatter(tick) for tick in test_ticks]
expected = [u'90.00\u00B0S', u'60.00\u00B0S', u'30.00\u00B0S',
u'0.00\u00B0', u'30.00\u00B0N', u'60.00\u00B0N',
u'90.00\u00B0N']
assert result == expected
def test_LatitudeFormatter_mercator():
formatter = LatitudeFormatter()
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-15496570.739707904, -8362698.548496634,
-3482189.085407435, 0.0, 3482189.085407435,
8362698.548496634, 15496570.739707898]
result = [formatter(tick) for tick in test_ticks]
expected = [u'80\u00B0S', u'60\u00B0S', u'30\u00B0S', u'0\u00B0',
u'30\u00B0N', u'60\u00B0N', u'80\u00B0N']
assert result == expected
def test_LatitudeFormatter_small_numbers():
formatter = LatitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [40.1275150, 40.1275152, 40.1275154]
result = [formatter(tick) for tick in test_ticks]
expected = [u'40.1275150\u00B0N', u'40.1275152\u00B0N',
u'40.1275154\u00B0N']
assert result == expected
def test_LongitudeFormatter_central_longitude_0():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_central_longitude_180():
formatter = LongitudeFormatter(zero_direction_label=True)
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'0\u00B0E', u'60\u00B0E', u'120\u00B0E', u'180\u00B0',
u'120\u00B0W', u'60\u00B0W', u'0\u00B0W']
assert result == expected
def test_LongitudeFormatter_central_longitude_120():
formatter = LongitudeFormatter()
p = ccrs.PlateCarree(central_longitude=120)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'60\u00B0W', u'0\u00B0', u'60\u00B0E', u'120\u00B0E',
u'180\u00B0', u'120\u00B0W', u'60\u00B0W']
assert result == expected
def test_LongitudeFormatter_degree_symbol():
formatter = LongitudeFormatter(degree_symbol='',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180W', u'120W', u'60W', u'0', u'60E', u'120E', u'180E']
assert result == expected
def test_LongitudeFormatter_number_format():
formatter = LongitudeFormatter(number_format='.2f',
dateline_direction_label=True)
p = ccrs.PlateCarree()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-180, -120, -60, 0, 60, 120, 180]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180.00\u00B0W', u'120.00\u00B0W', u'60.00\u00B0W',
u'0.00\u00B0', u'60.00\u00B0E', u'120.00\u00B0E',
u'180.00\u00B0E']
assert result == expected
def test_LongitudeFormatter_mercator():
formatter = LongitudeFormatter(dateline_direction_label=True)
p = ccrs.Mercator()
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-20037508.342783064, -13358338.895188706,
-6679169.447594353, 0.0, 6679169.447594353,
13358338.895188706, 20037508.342783064]
result = [formatter(tick) for tick in test_ticks]
expected = [u'180\u00B0W', u'120\u00B0W', u'60\u00B0W', u'0\u00B0',
u'60\u00B0E', u'120\u00B0E', u'180\u00B0E']
assert result == expected
def test_LongitudeFormatter_small_numbers_0():
formatter = LongitudeFormatter(number_format='.7f')
p = ccrs.PlateCarree(central_longitude=0)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'17.1142343\u00B0W', u'17.1142340\u00B0W',
u'17.1142337\u00B0W']
assert result == expected
def test_LongitudeFormatter_small_numbers_180():
formatter = LongitudeFormatter(zero_direction_label=True,
number_format='.7f')
p = ccrs.PlateCarree(central_longitude=180)
formatter.axis = Mock(axes=Mock(GeoAxes, projection=p))
test_ticks = [-17.1142343, -17.1142340, -17.1142337]
result = [formatter(tick) for tick in test_ticks]
expected = [u'162.8857657\u00B0E', u'162.8857660\u00B0E',
u'162.8857663\u00B0E']
assert result == expected
| lgpl-3.0 |
yhilpisch/dx | dx/plot.py | 1 | 5805 | #
# DX Analytics
# Helper Function for Plotting
# dx_plot.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import matplotlib as mpl; mpl.use('agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import cm
def plot_option_stats(s_list, pv, de, ve):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
'''
plt.figure(figsize=(9, 7))
sub1 = plt.subplot(311)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(312)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(313)
plt.plot(s_list, ve, 'yo', label='Vega')
plt.plot(s_list, ve, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_option_stats_full(s_list, pv, de, ve, th, rh, ga):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
th : array or list
results for theta
rh : array or list
results for rho
ga : array or list
results for gamma
'''
plt.figure(figsize=(10, 14))
sub1 = plt.subplot(611)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(612)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(613)
plt.plot(s_list, ve, 'yo', label='Gamma')
plt.plot(s_list, ve, 'b')
plt.grid(True)
plt.legend(loc=0)
sub4 = plt.subplot(614)
plt.plot(s_list, th, 'mo', label='Vega')
plt.plot(s_list, th, 'b')
plt.grid(True)
plt.legend(loc=0)
sub5 = plt.subplot(615)
plt.plot(s_list, rh, 'co', label='Theta')
plt.plot(s_list, rh, 'b')
plt.grid(True)
plt.legend(loc=0)
sub6 = plt.subplot(616)
plt.plot(s_list, ga, 'ko', label='Rho')
plt.plot(s_list, ga, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_greeks_3d(inputs, labels):
''' Plot Greeks in 3d.
Parameters
==========
inputs : list of arrays
x, y, z arrays
labels : list of strings
labels for x, y, z
'''
x, y, z = inputs
xl, yl, zl = labels
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0.5, antialiased=True)
ax.set_xlabel(xl)
ax.set_ylabel(yl)
ax.set_zlabel(zl)
fig.colorbar(surf, shrink=0.5, aspect=5)
def plot_calibration_results(cali, relative=False):
''' Plot calibration results.
Parameters
==========
cali : instance of calibration class
instance has to have opt_parameters
relative : boolean
if True, then relative error reporting
if False, absolute error reporting
'''
cali.update_model_values()
mats = set(cali.option_data[:, 0])
mats = np.sort(list(mats))
fig, axarr = plt.subplots(len(mats), 2, sharex=True)
fig.set_size_inches(8, 12)
fig.subplots_adjust(wspace=0.2, hspace=0.2)
z = 0
for T in mats:
strikes = strikes = cali.option_data[cali.option_data[:, 0] == T][:, 1]
market = cali.option_data[cali.option_data[:, 0] == T][:, 2]
model = cali.model_values[cali.model_values[:, 0] == T][:, 2]
axarr[z, 0].set_ylabel('%s' % str(T)[:10])
axarr[z, 0].plot(strikes, market, label='Market Quotes')
axarr[z, 0].plot(strikes, model, 'ro', label='Model Prices')
axarr[z, 0].grid()
if T is mats[0]:
axarr[z, 0].set_title('Option Quotes')
if T is mats[-1]:
axarr[z, 0].set_xlabel('Strike')
wi = 2.
if relative is True:
axarr[z, 1].bar(strikes - wi / 2,
(model - market) / market * 100, width=wi)
else:
axarr[z, 1].bar(strikes - wi / 2, model - market, width=wi)
axarr[z, 1].grid()
if T is mats[0]:
axarr[z, 1].set_title('Differences')
if T is mats[-1]:
axarr[z, 1].set_xlabel('Strike')
z += 1
| agpl-3.0 |
cellular-nanoscience/pyotic | pyoti/modification/modification.py | 1 | 26859 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:22:31 2016
@author: Tobias Jachowski
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
from .. import gui
from .. import helpers as hp
from .. import traces as tc
from ..evaluate import signal as sn
from ..graph import GraphMember
from ..picklable import InteractiveAttributes
class GraphicalMod(object):
"""
This class's subclasses should implement `_figure()` and `_update_fig()`,
which return and update a matplotlib figure, respectively. The figure can
be accessed by `self.figure`.
Parameters
----------
figure
modification : Modification
"""
def __init__(self, modification=None, **kwargs):
# Register the modification which should be graphically adjusted
self.modification = modification
# Initialize figure to None, which effectively disables
# `self.update_fig()` and Co. and prevent them from throwing an error
self._fig = None
def _set_plot_params(self, plot_params=None):
if plot_params is None:
plot_params = {}
gui.set_plot_params(plot_params=plot_params)
def display(self, plot_params=None):
self.init_fig(plot_params=plot_params)
def init_fig(self, show=True, plot_params=None):
"""
This method calls self._figure() to create an interactive figure and
interact with the user to determine the parameters necessary to
calculate the modification (see self._recalculate()). and
self._close_fig() to release all references to the actors of the
figure.
`self._figure()` and self._close_fig() should be (over)written by
subclasses.
"""
# Only create a figure, if the function `self._figure()` is implemented
if not hasattr(self, '_figure'):
return
# close the figure
# nbagg backend needs to have the figure closed and recreated
# whenever the code of the cell displaying the figure is executed.
# A simple update of the figure would let it disappear. Even a
# self.figure.show() wouldn't work anymore.
# For backends this just means a bit of extra calculation.
# Therefore, close the figure first before replotting it.
self.close_fig()
# set default plot parameters, can be recalled / overwritten in
# `self._figure()`
self._set_plot_params(plot_params=plot_params)
# create the figure
self.figure = self._figure()
# update the figure
self.update_fig()
# show the figure
if show:
self.figure.show()
def update(self, **kwargs):
self.update_fig(**kwargs)
def update_fig(self, **kwargs):
if self._fig is not None:
self._update_fig(**kwargs)
self._figure_canvas_draw()
def _update_fig(self, **kwargs):
pass
def close_fig(self):
if self._fig is not None:
self._pre_close_fig()
self._close_fig()
self._post_close_fig()
def _pre_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _close_fig(self):
# force redraw of the figure
self._figure_canvas_draw()
# close the figure
plt.close(self.figure)
# release memory
self.figure = None
def _post_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _figure_canvas_draw(self):
# Some matplotlib backends will throw an error when trying to draw the
# canvas. Simply ignoring the error that could happen here will prevent
# the figure from not beeing closed, left open, and preventing the next
# figure to be drawn. Even though the "except: pass" clause is
# considered bad, here the worst thing that could happen is that the
# figure produced by the matplotlib backend upon closing is not
# updated. Therefore, "except: pass" should be considered as an
# acceptable workaround for this case.
try:
# redraw the figure, before closing it
self.figure.canvas.draw()
except:
pass
@property
def figure(self):
"""
The matplotlib figure that represents and/or adjusts the parameters of
`self.modification`.
"""
# Automatically initialize a figure
if self._fig is None:
self.init_fig(show=False)
# Return a previously initialized figure
return self._fig
@figure.setter
def figure(self, figure):
self._fig = figure
class Modification(GraphMember, metaclass=ABCMeta):
"""
Modification is an abstract class, that implements methods to modify the
data of a `View` (`view_apply`) and adjust the parameters which control the
behaviour of the modifications applied.
Whenever one of the parameters needed to calculate the modification is
changed, the view, this modification is applied to, is informed.
`self.set_changed()` Has to be called upon any change of the modification
that influences the behaviour of `self.modify()`. In essence, these are all
parameters that are used to determine the modification. Therefore, this
should be called by all setters of the parameters/attributes.
Every subclass of Modification has to implement a constructor method
`self.__init__(self, **kwargs)`, which calls the superclasses' constructor
and sets the traces, the modification is applied to with the keyword
parameter `traces_apply`. An example could be:
super().__init__(traces_apply=['psdX', 'psdZ'], **kwargs)
"""
# set a graphical modification, which will, per default, do nothing
GRAPHICALMOD = GraphicalMod
def __init__(self, traces_apply=None, view_apply=None, view_based=None,
automatic_switch=False, datapoints=-1, **kwargs):
# Call the constructor of the superclass `GraphMember` and set the
# maximum allowed number of parents (`view_based`) and childs
# (`view_apply`) to one.
super().__init__(max_children=1, max_parents=1, **kwargs)
# A `Modification` has to be applied to a `View`!
if view_apply is None:
raise TypeError("Modification missing required positional argument"
" `view_apply`.")
# Set the view, from where the parameters for the modification are
# calculated from
if view_based is not None:
self.view_based = view_based
# Set the view, whose data is going to be modified
self.view_apply = view_apply
# Set the traces, which are modified by this `Modification`
self.traces_apply = traces_apply
# Initialize InteractiveAttributes object, which will hold all the
# parameters that the user should interact with.
self.iattributes = InteractiveAttributes()
# A checkbox to switch on/off the automatic determination of the
# parameters that are used to calculate the modification in the method
# `self.recalculate()`. The attribute `self.automatic` is checked in
# the method `self.recalculate()`. If `automatic` is True, the
# parameters are recalculated, otherwise the parameters are left
# unchanged. Whenever `automatic` is changed (by the user or
# automatically), `self.evaluate()` is called.
if automatic_switch:
self.add_iattribute('automatic', description='Automatic mode',
value=True, unset_automatic=False,
set_changed=False,
callback_functions=[self.evaluate])
# A checkbox to de-/activate this `Modification`. This attribute gets
# evaluated by `self.modify()`. If the `Modification` is active, it
# modifies data, otherwise not, i.e. modify() returns modified or
# unmodified original data, respectively.
desc = "".join((self.__class__.__name__, " active"))
self.add_iattribute('active', description=desc, value=True,
unset_automatic=False)
# Datapoints is used to calculate and/or present modification. The
# attribute `datapoints` is used to calculate a decimating factor and
# speed up the calculations and/or plot commands.
if datapoints > 0:
desc = "Datapoints to calculate/visualize modification"
self.add_iattribute('datapoints', description=desc,
value=datapoints, unset_automatic=False)
# Add a Button to manually call the method `self.evaluate()`.
self.add_iattribute('evaluate', description='Evaluate',
unset_automatic=False, set_changed=False,
callback_functions=[self.evaluate])
def add_iattribute(self, key, description=None, value=None,
unset_automatic=True, set_changed=True,
callback_functions=None, **kwargs):
"""
Add logic for automatic checkbox.
Register widget with unset_automatic=True
(-> Upon change of widget, unset automatic mode).
Change default behaviour by setting kwarg: unset_automatic = False
Add logic for triggering changed (calling self.set_changed).
Register widget with set_changed=True.
"""
if callback_functions is None:
callback_functions = []
if unset_automatic:
callback_functions.append(self._unset_automatic)
if set_changed:
callback_functions.append(self.set_changed)
self.iattributes.add(key, description=description, value=value,
callback_functions=callback_functions, **kwargs)
def _unset_automatic(self, leave_automatic=False, **kwargs):
"""
Add the logic for the automatic checkbox. If the value of an attribute
is changed and the attribute was created with `unset_automatic=True`,
deactivate the automatic mode (see `self.add_iattribute()`). To
temporarily leave the automatic mode status untouched when changing the
value of an attribute, i.e. not unset the automatic mode, set the value
of the attribute with the keyword argument `leave_automatic=True`
(see method `self.iattributes.set_value()`)
"""
if not leave_automatic:
self.iattributes.set_value('automatic', False, callback=False)
def evaluate(self):
"""
Implement the (re)calculation for the values necessary to calculate the
modification in the subclass and call recalculate() of the superclass
(this class).
"""
if self.updated:
# This method makes sure the modification is calculated with the
# current values of the View this modification is based on. It is
# called by self.modify().
# When a View requests data, it calls modify(), which in turn calls
# recalculate(). Recalculate(), if necessary, calls
# get_data_modified() from the View it is based on, which again
# triggers a call of modify() and a subsequent recalcaulte() of all
# modifications associated with this View.
# Modification need update, because view, this mod is based on,
# was changed.
# self._view_based.evaluate()is not needed, it is called via:
# recalculate() -> get_data_based() -> _view_based.get_data() ->
# get_modified_data() -> super().evaluate()
return
# Recalculate and print info of recalculated values if in automatic
# mode
if self.recalculate():
self.print_info()
# Update figure after recalculation has taken place
self.graphicalmod.update()
def recalculate(self):
# Check if recalculation of parameters is necessary
if self.updated:
return False
# Check the attribute self.automatic, whether the parameters needed for
# the calculation of the modification should be determined
# automatically or not. If values are set manually, no recalculation is
# necessary, and `self` is therefore up to date.
if not self.automatic:
self.updated = True
return True
# Recalculate the parameters, inform the view this `Modification`
# is applied to about the change, and set `self` to be updated.
self._recalculate()
self.set_changed(updated=True)
return True
def _recalculate(self):
"""
This method should be overwritten by subclasses and perform the
recalculation necessary to determine the parameters used by this
Modification to modify the data in `self._modify()`.
"""
pass
def print_info(self):
print("Values for Modification of class %s:"
% self.__class__.__name__)
if not self.automatic:
print(" Parameters set manually!")
for key, widget in self.iattributes._widgets.items():
if hasattr(widget, 'value'):
if isinstance(widget.value, float):
print(" %s: %.5f" % (widget.description, widget.value))
if isinstance(widget.value, collections.Iterable):
print(" %s: %s" % (widget.description, widget.value))
self._print_info()
def _print_info(self):
"""
This method should be overwritten by subclasses, which want to print
extra info additionally to the info of the calculated paremeters.
"""
pass
def modify(self, data, samples, traces_idx):
"""
Modifies data and returns the modified array.
Parameters
----------
data : 2D numpy.ndarray of type float
`data` holds the data to be modified
samples : index array or slice
`samples` is the index of the samples that was used to get the
`data`
traces : index array or slice
`traces` is the index of the traces that was used to get the `data`
"""
# Modification is active.
if self.active:
# Check if traces contained in data are modified by this
# modification.
data_traces = self.view_apply.idx_to_traces(traces_idx)
mod_traces = self.traces_apply
# Calculate the indices of traces contained in data and
# modification. First, calculate indices of modification traces.
mod_index = hp.overlap_index(mod_traces, data_traces)
if len(mod_index) > 0:
# At least one trace exists in both data and modification.
# Therefore, the data needs to be modified...
mod_index = hp.slicify(mod_index)
# Calculate indices of traces of the data in such a way that
# `data[:, data_index]` indexes the same traces as
# `self.traces_apply[mod_index]`
data_index = np.array([data_traces.index(trace)
for trace
in np.array(mod_traces)[mod_index]])
data_index = hp.slicify(data_index)
# Trigger a recalculation of the parameters for the
# modification (if necessary) before modifying the data.
self.evaluate()
# Modify and return the modified data
return self._modify(data=data,
samples=samples,
data_traces=data_traces,
data_index=data_index,
mod_index=mod_index)
# Return unmodified data
return data
@abstractmethod
def _modify(self, data, samples, data_traces, data_index, mod_index):
"""
Is called by self.modify() whenever data is requested and needs to be
modified.
Parameters
----------
data : 2D numpy.array()
Contains the data, indexed by samples and data_traces
samples : slice or 1D numpy.array()
Is the index of the samples contained in data, which was
given/asked by the user/process who called _get_data().
data_traces : list of str
Contains a list of traces (str) existent in data, which
was given/asked by the user/process who called _get_data().
data_index : slice or 1D numpy.array()
data[:, data_index] gives the data, which is modified by
this modification
mod_index : slice or 1D numpy.array()
np.array(self.traces_apply)[mod_index] gives the traces,
which are existent in data and also modified by this modfication.
Returns
-------
2D numpy.array()
The modified data.
"""
# modify data here, like so:
# data[:,data_index] -= modification[:,mod_index]
return data
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, value):
"""
Gets set to True, after all `Views`, this `Modification` is based on,
have been updated and after this `Modification` has been recalculated.
This is automatically taken care of by `self.evaluate()` ->
`self.recalculate()`.
Gets called by a `View`, this `Modification` is based on, whenever the
`View` (a `Modification` of the `View`) has been changed. It
automatically informs its own `View`, that there was a change, by
calling `self.set_changed()`.
"""
self._updated = value
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, the modification needs to recalculate itself, i.e.
# the modification will alter its changeing behaviour. Because an
# index_shift change is only transmitted to `level=1`, inform the
# descendants of the change itself. A change of descendants is ignored.
if index_shift is not None and not calledfromself and ancestor:
self.set_changed(includeself=False)
# Update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _get_data(self, based=True, samples=None, traces=None, window=False,
decimate=False, copy=True):
if based:
view = self.view_based
else:
view = self.view_apply
if not isinstance(window, bool) and isinstance(window, int):
window = window
elif window:
window = self.decimate
else:
window = 1
if not isinstance(decimate, bool) and isinstance(decimate, int):
decimate = decimate
elif decimate:
decimate = self.decimate
else:
decimate = 1
if not based:
old_active = self.iattributes.active
self.iattributes.set_value('active', False, callback=False)
data = view.get_data(traces=traces, samples=samples,
moving_filter='mean', window=window,
decimate=decimate, copy=copy)
if not based:
self.iattributes.set_value('active', old_active, callback=False)
return data
def _get_data_based(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=True, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def _get_data_apply(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
Get data of view apply with all modifications applied, except self.
This is achieved by setting the self.__active flag to False.
self.__active is intentionally set directly by accessing the attribute
and not using the property/set_active() method, to prevent firing the
self.set_changed() method within the set_active() method.
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=False, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def calculate_bin_means(self, data=None, traces=None, bins=None,
datapoints_per_bin=None, sorttrace=0):
"""
Calculates binned means based on the data to be fitted. The binned
means are usually used by data fitting routines.
Parameters
----------
data : 2D numpy.ndarray of type float, optional
Defaults to `self._get_data_based(traces=traces, decimate=True)`.
traces : str or list of str, optional
Defaults to `self.traces_apply`.
bins : int, optional
Number of bins that contain the datapoints to be averaged. If
possible, it defaults to (`self.iattributes.datapoints` /
`datapoints_per_bin`), otherwise bins defaults to
(`self.view_based.datapoints` / `datapoints_per_bin`).
datapoints_per_bin : int, optional
Average number of datapoints to be averaged in one bin. Defaults to
25.
sorttrace : int, optional
Trace (column) of `data` that acts as sorting index upon binning
for the rest of the data. Defaults to the first trace of the data.
Returns
-------
1D numpy.ndarray of type float
The averaged bin values.
float
The size of one bin.
"""
# Bin data and average bins to prevent arbitrary weighting of bins with
# more datapoints
if bins is None:
bins = self._bins(datapoints_per_bin=datapoints_per_bin)
# get the traces to retrieve data from
if traces is None:
traces = self.traces_apply
# get the data to bin
if data is None:
data = self._get_data_based(traces=traces, decimate=True)
# create the bins based on one trace of the data
minimum = np.min(data[:, sorttrace])
maximum = np.max(data[:, sorttrace])
edges = np.linspace(minimum, maximum, bins + 1)
# Get the indices of the bins to which each value in input array
# belongs.
bin_idx = np.digitize(data[:, sorttrace], edges)
# Find which points are on the rightmost edge.
on_edge = data[:, sorttrace] == edges[-1]
# Shift these points one bin to the left.
bin_idx[on_edge] -= 1
# fill the bins with the means of the data contained in each bin
bin_means = np.array([data[bin_idx == i].mean(axis=0)
for i in range(1, bins + 1)
if np.any(bin_idx == i)])
bin_width = edges[1] - edges[0]
return bin_means, bin_width
def _bins(self, datapoints_per_bin=None):
# On average 25 datapoints per bin
datapoints_per_bin = datapoints_per_bin or 25
if 'datapoints' in self.iattributes:
bins = self.iattributes.datapoints / datapoints_per_bin
else:
bins = self.view_based.datapoints / datapoints_per_bin
bins = max(1, int(np.round(bins)))
return bins
_NAME = {
'position': ['positionX', 'positionY'],
'psd': ['psdX', 'psdY'],
'axis': ['X', 'Y']
}
def _excited(self, traces=None):
traces = traces or ['positionX', 'positionY']
data = self._get_data_based(traces=traces, copy=False)
return sn.get_excited_signal(data)
def interact(self):
self.recalculate()
self.iattributes.display()
self.graphicalmod.display()
@property
def graphicalmod(self):
# ZODB volatile
if not hasattr(self, '_v_graphicalmod'):
self._v_graphicalmod \
= self.__class__.GRAPHICALMOD(modification=self)
return self._v_graphicalmod
@property
def active(self):
active = False
if 'active' in self.iattributes:
active = self.iattributes.active
return active
@active.setter
def active(self, active=True):
if 'active' in self.iattributes:
self.iattributes.active = active
@property
def automatic(self):
# Does the modification automatically calculate its parameters
automatic = True
if 'automatic' in self.iattributes:
automatic = self.iattributes.automatic
return automatic
@property
def datapoints(self):
if 'datapoints' in self.iattributes:
return self.iattributes.datapoints
else:
return self.view_based.datapoints
@property
def decimate(self):
if 'datapoints' in self.iattributes:
return max(1, int(np.round(self.view_based.datapoints
/ self.datapoints)))
else:
return 1
@property
def view_based(self):
return self.parent
@property
def view_apply(self):
return self.child
@view_based.setter
def view_based(self, view):
self.set_parent(view)
@view_apply.setter
def view_apply(self, view):
self.set_child(view)
def lia(self, trace):
"""
Return the local index of trace in traces_apply
"""
return self.traces_apply.index(trace)
@property
def traces_apply(self):
# return a copy to protect local copy
return self._traces_apply.copy()
@traces_apply.setter
def traces_apply(self, traces):
if traces is None:
traces_apply = []
else:
traces_apply = tc.normalize(traces)
self._traces_apply = traces_apply
| apache-2.0 |
omnirom/android_kernel_htc_flounder | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
phgupta/Building-Analytics | building-analytics/TS_Util_Clean_Data.py | 1 | 15125 | # -*- coding: utf-8 -*-
"""
@author : Armando Casillas <armcasillas@ucdavis.edu>
@author : Marco Pritoni <marco.pritoni@gmail.com>
Created on Wed Jul 26 2017
Update Aug 08 2017
"""
from __future__ import division
import pandas as pd
import os
import sys
import requests as req
import json
import numpy as np
import datetime
import pytz
from pandas import rolling_median
from matplotlib import style
import matplotlib
class TS_Util(object):
########################################################################
## simple load file section - eventually replace this with CSV_Importer
def _set_TS_index(self, data):
'''
Parameters
----------
Returns
-------
'''
# set index
data.index = pd.to_datetime(data.index)
# format types to numeric
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data
def load_TS(self, fileName, folder):
'''
Parameters
----------
Returns
-------
'''
path = os.path.join(folder, fileName)
data = pd.read_csv(path, index_col=0)
data = self._set_TS_index(data)
return data
########################################################################
## time correction for time zones - eventually replace this with CSV_Importer
def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
'''
Function takes in pandas dataframe and adjusts index according to timezone in which is requested by user
Parameters
----------
data: Dataframe
pandas dataframe of json timeseries response from server
local_zone: string
pytz.timezone string of specified local timezone to change index to
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
data.index = data.index.tz_localize(pytz.utc).tz_convert(
local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
def _local_to_utc(self, timestamp, local_zone="America/Los_Angeles"):
'''
Parameters
----------
# Change timestamp request time to reflect request in terms of local time relative to utc - working as of 5/5/17 ( Should test more )
# remove and add to TS_Util and import
Returns
-------
'''
timestamp_new = pd.to_datetime(
timestamp, infer_datetime_format=True, errors='coerce')
timestamp_new = timestamp_new.tz_localize(
local_zone).tz_convert(pytz.utc)
timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')
return timestamp_new
########################################################################
## remove start and end NaN: Note issue with multi-column df
def remove_start_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
start_ok_data = data[var].first_valid_index()
else:
start_ok_data = data.first_valid_index()
data = data.loc[start_ok_data:, :]
return data
def remove_end_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
end_ok_data = data[var].last_valid_index()
else:
end_ok_data = data.last_valid_index()
data = data.loc[:end_ok_data, :]
return data
########################################################################
## Missing data section
def _find_missing_return_frame(self, data):
'''
Function takes in pandas dataframe and find missing values in each column
Parameters
----------
data: Dataframe
Returns
-------
data: Dataframe
'''
return data.isnull()
def _find_missing(self, data, return_bool=False):
if return_bool == False: # this returns the full table with True where the condition is true
data = self._find_missing_return_frame(data)
return data
elif return_bool == "any": # this returns a bool selector if any of the column is True
bool_sel = self._find_missing_return_frame(data).any(axis=1)
return bool_sel
elif return_bool == "all": # this returns a bool selector if all of the column are True
bool_sel = self._find_missing_return_frame(data).all(axis=1)
return bool_sel
else:
print("error in multi_col_how input")
return
def display_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[bool_sel]
def count_missing(self, data, output="number"):
'''
Parameters
----------
how = "number" or "percent"
Returns
-------
'''
count = self._find_missing(data,return_bool=False).sum()
if output == "number":
return count
elif output == "percent":
return ((count / (data.shape[0])) * 100)
def remove_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[~bool_sel]
########################################################################
## Out of Bound section
def _find_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = ((data < lowBound) | (data > highBound))
return data
def display_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
def count_outOfBound(self, data, lowBound, highBound, output):
'''
Parameters
----------
Returns
-------
'''
count = self._find_outOfBound(data, lowBound, highBound).sum()
if output == "number":
return count
elif output == "percent":
return count / (data.shape[0]) * 1.0 * 100
def remove_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[~self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
########################################################################
## Outliers section
def _calc_outliers_bounds(self, data, method, coeff, window):
'''
Parameters
----------
Returns
-------
'''
if method == "std":
lowBound = (data.mean(axis=0) - coeff * data.std(axis=0)).values[0]
highBound = (data.mean(axis=0) + coeff * data.std(axis=0)).values[0]
elif method == "rstd":
rl_mean=data.rolling(window=window).mean(how=any)
rl_std = data.rolling(window=window).std(how=any).fillna(method='bfill').fillna(method='ffill')
lowBound = rl_mean - coeff * rl_std
highBound = rl_mean + coeff * rl_std
elif method == "rmedian":
rl_med = data.rolling(window=window, center=True).median().fillna(
method='bfill').fillna(method='ffill')
lowBound = rl_med - coeff
highBound = rl_med + coeff
elif method == "iqr": # coeff is multip for std and IQR or threshold for rolling median
Q1 = data.quantile(.25) # coeff is multip for std or % of quartile
Q3 = data.quantile(.75)
IQR = Q3 - Q1
lowBound = Q1 - coeff * IQR
highBound = Q3 + coeff * IQR
elif method == "qtl":
lowBound = data.quantile(.005)
highBound = data.quantile(.995)
else:
print ("method chosen does not exist")
lowBound = None
highBound = None
return lowBound, highBound
def display_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.display_outOfBound(data, lowBound, highBound)
return data
def count_outliers(self, data, method, coeff, output, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
count = self.count_outOfBound(data, lowBound, highBound, output=output)
return count
def remove_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.remove_outOfBound(data, lowBound, highBound)
return data
########################################################################
## If condition section
def _find_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
#print(val)
bool_sel = (data == val)
return bool_sel
def _find_greater_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data > val)
return bool_sel
def _find_less_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data < val)
return bool_sel
def _find_greater_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data >= val)
return bool_sel
def _find_less_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data <= val)
return bool_sel
def _find_different_from_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = ~(data == val)
return bool_sel
def count_if(self, data, condition, val, output="number"):
"""
condition = "equal", "below", "above"
val = value to compare against
how = "number" or "percent"
"""
if condition == "=":
count = self._find_equal_to_values(data,val).sum()
elif condition == ">":
count = self._find_greater_than_values(data,val).sum()
elif condition == "<":
count = self._find_less_than_values(data,val).sum()
elif condition == ">=":
count = self._find_greater_than_or_equal_to_values(data,val).sum()
elif condition == "<=":
count = self._find_less_than_or_equal_to_values(data,val).sum()
elif condition == "!=":
count = self._find_different_from_values(data,val).sum()
if output == "number":
return count
elif output == "percent":
return count/data.shape[0]*1.0*100
return count
########################################################################
## Missing Data Events section
def get_start_events(self, data, var = "T_ctrl [oF]"): # create list of start events
'''
Parameters
----------
Returns
-------
'''
start_event = (data[var].isnull()) & ~(data[var].shift().isnull()) # find NaN start event
start = data[start_event].index.tolist() # selector for these events
if np.isnan(data.loc[data.index[0],var]): # if the first record is NaN
start = [data.index[0]] + start # add first record as starting time for first NaN event
else:
start = start
return start
def get_end_events(self, data, var = "T_ctrl [oF]"): # create list of end events
'''
Parameters
----------
Returns
-------
'''
end_events = ~(data[var].isnull()) & (data[var].shift().isnull()) # find NaN end events
end = data[end_events].index.tolist() # selector for these events
if ~np.isnan(data.loc[data.index[0],var]): # if first record is not NaN
end.remove(end[0]) # remove the endpoint ()
if np.isnan(data.loc[data.index[-1],var]): # if the last record is NaN
end = end + [data.index[-1]] # add last record as ending time for first NaN event
else:
end = end
return end
def create_event_table(self, data, var): # create dataframe of of start-end-length for current house/tstat
'''
Parameters
----------
Returns
-------
'''
# remove initial and final missing data
self.remove_start_NaN(data, var)
self.remove_end_NaN(data, var)
# create list of start events
start = self.get_start_events(data, var)
# create list of end events
end = self.get_end_events(data, var)
# merge lists into dataframe and calc length
events = pd.DataFrame.from_items([("start",start), ("end",end )])
events["length_min"] = (events["end"] - events["start"]).dt.total_seconds()/60 # note: this needs datetime index
#print events
events.set_index("start",inplace=True)
return events
| mit |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
fxia22/pointGAN | show_ae.py | 1 | 1680 | from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenC, PointNetAE
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
ae = PointNetAE(num_points = 2048)
ae.load_state_dict(torch.load(opt.model))
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], classification = True, npoints = 2048)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64,
shuffle=True, num_workers=1)
ae.cuda()
i,data = enumerate(dataloader, 0).next()
points, _ = data
points = Variable(points)
bs = points.size()[0]
points = points.transpose(2,1)
points = points.cuda()
gen = ae(points)
point_np = gen.transpose(2,1).cpu().data.numpy()
#showpoints(points.transpose(2,1).cpu().data.numpy())
showpoints(point_np)
#sim_noise = Variable(torch.randn(1000, 100))
#points = gen(sim_noise)
#point_np = points.transpose(2,1).data.numpy()
#print(point_np.shape)
#np.savez('gan.npz', points = point_np)
| mit |
BU-PyCon/Meeting-2 | Programs/PyPlot.py | 1 | 18763 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import *
import pdb
print("""
MatPlotLib Advanced Tutorial
----------------------------
This is a tutorial covering the features and usage of the matplotlib package
in more detail. In truth, no single tutorial can cover all the features that
exist in the matplotlib package since it is extremely expansive. This tutorial
will cover as much material as possible to let you know of the features that
are available to you when plotting data.
Some Notes:
1) A few parts of this program uses pdb to pause the program and allow the
user to try making things for themselves. Use c to continue with the
program.
2) This program uses plt for the reference name of pyplot. All pyplot methods
should be preceded with the qualifier plt such as plt.show().
3) For the best results, run this program with ipython. Regular python may
dislike plotting during program execution.
""")
pause = input("Press [Enter] to continue...")
print('\n'*100)
print("""
###
## pyplot
###
Within the matplotlib package, the main module you want to be using is the
pyplot module. It is generally imported as
import matplotlib.pyplot as plt
The pyplot module has many useful functions that can be called and used and
we will go over them one by one. For reference, some useful methods are shown
below.
>>> plt.close() # Closes the current figure. Optional arguments
include passing in a figure, figure number,
or the string 'all' which closes all figures.
>>> plt.draw() # Forces the figure to be redrawn. Useful if it
was been updated after it was last shown or
drawn.
>>> plt.gca() # Returns the currently active axes object
>>> plt.gcf() # Returns the currently active figure object
>>> plt.show() # Shows the latest figure. By default, matplotlib
pauses and waits for the window to be closed
before continuing. This feature can be turned
off with the keyword block = False.
>>> plt.savefig('title.png') # Saves the figure to a file. The file type is
automatically determined by the extension.
Supported formats include png, pdf, ps, eps,
and svg. This has the keywords dpi which
specifies the resolution of the output and
bbox_inches which, when set to 'tight' reduces
any extra white space in the saved file.
>>> plt.subplots_adjust() # Allows for adjusting parameters of the layout
such as the horizontal space (hspace) or width
space (wspace) between plots, as well as the
left, right, top, and bottom padding.
""")
pause = input("Press [Enter] to continue...")
print('\n'*100)
print("""
###
## Components of a Plot
###
At it's core, matplotlib is nothing more than a graphics package. Every
component of a plot is just a particular "Artist", all drawn on top of
each other to make a nice looking plot.
The beauty of pyplot is the degree of customization that you can have.
You have control over every individual component of this plot and you can
change each of them individually. To do this properly, we will focus on
using the object oriented feature of matplotlib.
Before we talk about how to work with all these features, we need to know
what they are. A window should have just popped up that you can examine.
This window shows all the various components of a figure and the names that
pyplot uses for them. This figure contains the following components
-> Figure The main part of the plot which everything is shown on. This
encompasses the entire area of the window, excluding the toolbar.
-> Axes A single plot, added to the figure. This can have many sets of
data added to it along with other components such as legends.
Axes can even sit on top of other axes, but importantly, they
are still a component of figure, not the axes they may sit inside
of.
-> Axis Note the difference here! This is an axIs not an axEs. This
component is a single axis on the axes and defines how the data
is plotted. An axes, by default has two axises, the x and y
(unless you're plotting in 3D in which case it has a z). You can
add more axises though. Each axis has various components such as
the spine, tick labels, major ticks, and minor ticks.
-> Spine Each axis has various components. One of them is the spine. This
is the actual black line at the border of the plots that the
tick marks are drawn on. Each default axis has 2 spines. For the
x axis, it has the top and bottom spine and likewise the y axis
has the right and left.
-> Legend Each axes can have a legend added to it. The legend can have lines
on it, one for each curve labeled.
""")
x = np.arange(0,4*np.pi+np.pi/8,np.pi/8)
y1 = np.sin(x)
y2 = np.cos(x)
fig, (ax1, ax2) = plt.subplots(2, figsize = (10,7))
fig.canvas.set_window_title('Pyplot Figure Components')
plt.subplots_adjust(hspace = 0.4)
plt.suptitle('Figure title', fontsize = 20)
#Create subplot 1
ax1.plot(x, y1, '-dr', label = '$sin(x)$')
ax1.plot(x, np.array([0]*len(x)), '--k')
ax1.set_xlim([0,4*np.pi])
ax1.set_title('Axes 1 Title')
ax1.set_xlabel('Axes 1 x-axis label')
ax1.set_ylabel('Axes 1 y-axis label')
ax1.legend(loc = 'best')
#Create subplot 2
ax2.plot(x, y2, ':og', label = '$cos(x)$')
ax2.plot(x, np.array([0]*len(x)), '-k')
ax2.set_xlim([0,4*np.pi])
ax2.set_title('Axes 2 Title')
ax2.set_xlabel('Axes 2 x-axis label')
ax2.set_ylabel('Axes 2 y-axis label')
ax2.legend(loc = 'best')
#Add artists
ax = fig.add_axes([0,0,1,1])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_zorder(0)
ax.set_axis_bgcolor((0, 0, 0, 0))
ax.add_patch(Rectangle((0.01,0.01),0.98,0.98, fill = False, lw = 2, ec = 'b', transform=ax.transAxes))
ax.annotate('Figure', (0.02,0.02), textcoords = 'axes fraction',
fontsize = 20, color = 'b', transform=ax.transAxes)
ax.add_patch(Rectangle((0.04,0.5),0.9,0.44, fill = False, lw = 2, ec = 'g', transform=ax.transAxes))
ax.annotate('Axes', (0.05,0.52), textcoords = 'axes fraction',
fontsize = 20, color = 'g', transform=ax.transAxes)
ax.add_patch(Rectangle((0.11,0.08),0.03,0.38, fill = False, lw = 2, ec = 'r', transform=ax.transAxes))
ax.annotate('Axis', (0.045,0.4), textcoords = 'axes fraction',
fontsize = 20, color = 'r', transform=ax.transAxes)
ax.add_patch(Rectangle((0.11,0.08),0.8,0.04, fill = False, lw = 2, ec = 'r', transform=ax.transAxes))
ax.annotate('Axis', (0.85,0.04), textcoords = 'axes fraction',
fontsize = 20, color = 'r')
ax.annotate('Spine', (0.8,0.43), xytext = (0.8,0.35), xycoords = 'axes fraction',
color = (1,0.5,0), fontsize = 20,
textcoords = 'axes fraction', horizontalalignment='left',
arrowprops=dict(arrowstyle = '-|>', fc=(1,0.5,0)))
ax.annotate('', (0.9,0.32), xytext = (0.84,0.34), xycoords = 'axes fraction',
arrowprops=dict(arrowstyle = '-|>', fc=(1,0.5,0)))
plt.show(block = False)
plt.pause(0.01)
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Objects in matplotlib
###
The above mentioned components of a figure (along with a few others) are
all representable as objects. These objects are stored in a variable which
maintains the state of that object and also has functions the object can
call to change its state. Let's look at how we can use these objects to
create a new figure.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Creating a New Figure
###
There multiple ways to create a new figure. Probably the simplest is
>>> fig = figure(1, figsize = (5,5), tight_layout = True)
The 1 in this case is an ID for the figure (much like the logical unit
number in IDL). The keywords figsize and tight_layout are optional. The
former sets the physical size of the figure and the second tells the layout
manager to make the plots as close as possible. The state of the figure is
stored in the fig variable which knows entirely about this new figure.
Calling this figure method tells matplotlib that any subsequent plotting
commands should apply to this figure. We can switch to plotting on a new
figure by calling the figure command for another figure (or even switch
back to an old figure). Another method for creating figures is the following
>>> fig = plt.subplots()
This method is much more powerful, but these features will be discussed in
the next section. For reference here are a set of methods and their
functionality that the figure object can call
>>> fig.add_subplot(111) # Adds a subplot at the specified position
>>> fig.clear() # Clears the figure's axes
>>> fig.suptitle('Title') # Adds a title to the figure
Many of the methods listed above as pyplot methods, such as subplots_adjust or
draw, can be applied to a specific figure as well.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Creating a New Axes
###
Once you have a figure, it's time to add some Axeses to it. As mentioned
before, matplotlib supports using objects. If you've properly created your
figure, it will have been stored into an object. You can now call the method
add_subplot.
>>> ax = fig.add_subplot(1,1,1)
The order of these parameters is add_subplot(rows, columns, plotNo), where
plotNo is the number of the plot, starting at 1 in the upper left and counting
left to right then top to bottom. If all values are less than 10, an equivalent
procedure is to do
>>> ax = fig.add_subplot(111)
Note how this function has created and returned an axes object which we have
stored into the variable ax. There is another method which creates the figure
an axes at the same time
>>> fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1, figsize = (8,8))
The figure is stored into the first variable and the axes are stored into
the second variable with is a tuple of axes.
You can also call the plt.subplot() which acts like add_subplot() but adds
an axes to the currently active figure (determined by the last one referenced).
For more control over your axes positioning, you can specify the exact position
and extent of an axes with the subplot2grid function.
>>> ax = plt.subplot2grid((2,3),(1,0), colspan = 2, rowspan = 1)
This tells figure that there will be a grid of 2 x 3 plots (2 rows, 3 cols) and
this creates a new plot at the position (1,0) (second row, first column) with a
column span of 2 and a row span of 1. If you really want to specify the exact
location, try the add_axes method.
>>> ax = fig.add_axes([0.5, 0.5, 0.3, 0.3])
This tells the figure to put the lower left corner of the axes at the position
(0.5, 0.5) (as fractions of the figure size) and have it span a width and height
of (0.3, 0.3). This is useful to putting plots inside plots. Try this out for
yourself!
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Plotting to Axes
###
There are many types of plots that can be put on an axes. Below are some simple
examples.
>>> ax.plot() # Simple scatter/line plot
>>> ax.bar() # Vertical bar plot
>>> ax.barh() # Horizonatal bar plot
>>> ax.boxplot() # Box and wisker plot
>>> ax.contour() # Contour plot of lines
>>> ax.contourf() # Filled contour plot
>>> ax.errorbar() # Scatter/line plot with errorbars
>>> ax.fill() # Scatter/line plot which is filled below the curve
>>> ax.hist() # A histogram of the input data
>>> ax.loglog() # Scatter/line plot that is logarithmic on both axes
>>> ax.pie() # Pie chart
>>> ax.polar() # Polar plot
>>> ax.quiver() # 2D field of arrows
>>> ax.semilogx() # Scatter/line plot with logarithmic x and linear y.
>>> ax.semilogy() # Equivalent to semilogx, but now y is logarithmic
>>> ax.steamplot()# A streamline of a vector flow
>>> ax.step() # A step plot
Feel free to try out some of these. You may have to look up the proper
procedures online.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Axes Methods
###
Aside from the many plots, there are many useful methods to adjust the
properties of of the axes
>>> ax.add_patch() # Adds a 'patch' which is an artist like arrows or circles
>>> ax.annotate() # Adds a string with an arrow to the axes
>>> ax.axhspan() # Adds a horizontal bar across the plot
>>> ax.axvspan() # Adds a vertical bar across the plot
>>> ax.arrow() # Adds an arrow
>>> ax.cla() # Clears the axes
>>> ax.colorbar() # Colorbar added to the plot
>>> ax.grid() # Turns on grid lines, keywords include which (major or
minor) and axis (both, x, or y).
>>> ax.legend() # Legend added to the plot
>>> ax.minorticks_on() # Turns on minor tick marks
>>> ax.set_cmap() # Sets the color map of the axes
>>> ax.set_title() # Sets the title of the axes
>>> ax.set_xlabel() # Sets the x label of the axes
>>> ax.set_xlim() # Sets the x limits of the axes
>>> ax.set_xscale() # Sets the scale, either linear, log, or symlog
>>> ax.set_xticklabels()#A list of strings to use for the tick labels
>>> ax.set_xticks() # Set's values of tick marks with list
## The above x axis specific functions have analagous y axis functions
>>> ax.text() # Adds a string to the axes
>>> ax.tick_params() # Changes tick and tick label appearances
Try playing with these various features after creating a figure and axes.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Axis and Spines
###
Just as you can work with the specific axes on a figure, you can also work with
specific axis and spines on your axes. These can be extracted and stored in
their own variables, but it is generally easier to refer to them as the
components of the axes object. They are accessed in the following way.
>>> ax.xaxis
>>> ax.yaxis
>>> ax.spine['top'] # Spine is a dict with components, 'top', 'bottom',
'left', and 'right'.
These components of the axes have the following useful methods.
>>> ax.xaxis.set_major_formatter()# Set's how the tick marks are formatted
>>> ax.xaxis.set_major_locator() # Sets location of tick marks (see locators)
## The above major methods have analagous minor methods
>>> ax.xaxis.set_ticklabels() # Set to empty list to turn off labels
>>> ax.xaxis.set_ticks_position() # Change tick position to only 'top', 'left, etc.
## The above xaxis methods have analagous yaxis methods
>>> ax.spines['top'].set_color() # Sets the color of the spine
>>> ax.spines['top'].set_position()# Changes position of the spine
>>> ax.spines['top'].set_visible()# Turns off the spine
## The above spine methods have analagous methods for 'bottom', 'left', and
'right'
Feel free to play with these properties as well.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Higher Degrees of Customization
###
We could choose to go even further down the ladder than axis and spines. It is
possible to get the tickmark objects from an axis (via get_major_ticks()) and
change properties on a tickmark by tickmark basis. However, it is no longer
instructive to continue showing methods and ways of doing this as it can always
be looked up. For extreme control over every component of plotting, it is sometimes
useful to use the rcParams variable. This should be imported as
from matplotlib import rcParams
You can then refer to any component of the figure by referencing the dict's
keyword, and setting the value. Common examples include
>>> rcParams['lines.linewidth'] = 2 # Sets linewidths to be 2 by default
>>> rcParams['lines.color'] = 'r' # Sets line colors to be red by default
There are hundreds of parameters that can be set, all of which can be seen by
going here http://matplotlib.org/users/customizing.html.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Animations
###
This will only introduce the idea of animations. To actually produce saved
animations in the form of mp4 or some similar format requires installing third
party programs such as ffmpeg. However, matplotlib comes with an animation
package imported as matplotlib.animation. It has tools to allow you to
continually update a plot such that it is animated. There are abilities to
save the animation as well. Below is the code for a very simple animation plot.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
ax.set_xlim([0,2*np.pi])
x = np.arange(0, 2*np.pi, 0.01) # x-array
line, = ax.plot(x, np.sin(x)) # The comma after line makes it a tuple
#Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(i):
line.set_ydata(np.sin(x+i/10.0)) # update the data
return line,
#blit=True means only redraw the components which have updated. This is
#faster than redrawing everything.
ani = animation.FuncAnimation(fig, animate, init_func=init,
interval=25, blit=True)
plt.show()
""")
fig, ax = plt.subplots()
ax.set_xlim([0,2*np.pi])
x = np.arange(0, 2*np.pi, 0.01) # x-array
line, = ax.plot(x, np.sin(x)) # The comma after line makes it a tuple
#Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(i):
line.set_ydata(np.sin(x+i/10.0)) # update the data
return line,
#blit=True means only redraw the components which have updated. This is
#faster than redrawing everything.
ani = animation.FuncAnimation(fig, animate, init_func=init,
interval=25, blit=True)
plt.show(block = False)
print('Done...')
| mit |
cbertinato/pandas | pandas/tests/plotting/test_backend.py | 1 | 1151 | import pytest
import pandas
def test_matplotlib_backend_error():
msg = ('matplotlib is required for plotting when the default backend '
'"matplotlib" is selected.')
try:
import matplotlib # noqa
except ImportError:
with pytest.raises(ImportError, match=msg):
pandas.set_option('plotting.backend', 'matplotlib')
def test_backend_is_not_module():
msg = ('"not_an_existing_module" does not seem to be an installed module. '
'A pandas plotting backend must be a module that can be imported')
with pytest.raises(ValueError, match=msg):
pandas.set_option('plotting.backend', 'not_an_existing_module')
def test_backend_is_correct(monkeypatch):
monkeypatch.setattr('pandas.core.config_init.importlib.import_module',
lambda name: None)
pandas.set_option('plotting.backend', 'correct_backend')
assert pandas.get_option('plotting.backend') == 'correct_backend'
# Restore backend for other tests (matplotlib can be not installed)
try:
pandas.set_option('plotting.backend', 'matplotlib')
except ImportError:
pass
| bsd-3-clause |
leggitta/mne-python | examples/realtime/ftclient_rt_compute_psd.py | 17 | 2460 | """
==============================================================
Compute real-time power spectrum density with FieldTrip client
==============================================================
Please refer to `ftclient_rt_average.py` for instructions on
how to get the FieldTrip connector working in MNE-Python.
This example demonstrates how to use it for continuous
computation of power spectra in real-time using the
get_data_as_epoch function.
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import FieldTripClient
from mne.time_frequency import compute_epochs_psd
print(__doc__)
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
fig, ax = plt.subplots(1)
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=False, include=[], exclude=bads)
n_fft = 256 # the FFT size. Ideally a power of 2
n_samples = 2048 # time window on which to compute FFT
for ii in range(20):
epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
psd, freqs = compute_epochs_psd(epoch, fmin=2, fmax=200, n_fft=n_fft)
cmap = 'RdBu_r'
freq_mask = freqs < 150
freqs = freqs[freq_mask]
log_psd = 10 * np.log10(psd[0])
tmin = epoch.events[0][0] / raw_info['sfreq']
tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
if ii == 0:
im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
origin='lower', cmap=cmap)
ax.set_yticks(np.arange(0, len(freqs), 10))
ax.set_yticklabels(freqs[::10].round(1))
ax.set_xlabel('Frequency (Hz)')
ax.set_xticks(np.arange(0, len(picks), 30))
ax.set_xticklabels(picks[::30])
ax.set_xlabel('MEG channel index')
im.set_clim()
else:
im.set_data(log_psd[:, freq_mask].T)
plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
% (tmin, tmax), fontsize=10)
plt.pause(0.5)
plt.close()
| bsd-3-clause |
ovpn-to/oVPN.to-Client-Software | else/python/hooks.py | 1 | 17289 | # -*- coding: utf-8 -*-
#
# Hooks module for py2exe.
# Inspired by cx_freeze's hooks.py, which is:
#
# Copyright © 2007-2013, Anthony Tuininga.
# Copyright © 2001-2006, Computronix (Canada) Ltd., Edmonton, Alberta, Canada.
# All rights reserved.
#
import os, sys
# Exclude modules that the standard library imports (conditionally),
# but which are not present on windows.
#
# _memimporter can be excluded because it is built into the run-stub.
windows_excludes = """
_curses
_dummy_threading
_emx_link
_gestalt
_posixsubprocess
ce
clr
console
fcntl
grp
java
org
os2
posix
pwd
site
termios
vms_lib
_memimporter
""".split()
def init_finder(finder):
# what about renamed functions, like urllib.pathname2url?
#
# We should use ignore() for Python 2 names so that my py2to3
# importhook works. For modules that are not present on Windows,
# we should probably use excludes.append()
finder.excludes.extend(windows_excludes)
# python2 modules are ignored (but not excluded)
finder.ignore("BaseHTTPServer")
finder.ignore("ConfigParser")
finder.ignore("IronPython")
finder.ignore("SimpleHTTPServer")
finder.ignore("StringIO")
finder.ignore("__builtin__")
finder.ignore("_winreg")
finder.ignore("cPickle")
finder.ignore("cStringIO")
finder.ignore("commands")
finder.ignore("compiler")
finder.ignore("copy_reg")
finder.ignore("dummy_thread")
finder.ignore("future_builtins")
finder.ignore("htmlentitydefs")
finder.ignore("httplib")
finder.ignore("md5")
finder.ignore("new")
finder.ignore("thread")
finder.ignore("unittest2")
finder.ignore("urllib2")
finder.ignore("urlparse")
def hook_pycparser(finder, module):
"""pycparser needs lextab.py and yacctab.py which are not picked
up automatically. Make sure the complete package is included;
otherwise the exe-files may create yacctab.py and lextab.py when
they are run.
"""
finder.import_package_later("pycparser")
def hook_pycparser__build_tables(finder, module):
finder.ignore("lextab")
finder.ignore("yacctab")
finder.ignore("_ast_gen")
finder.ignore("c_ast")
def hook_pycparser_ply(finder, module):
finder.ignore("lex")
finder.ignore("ply")
def hook_OpenSSL(finder, module):
"""OpenSSL needs the cryptography package."""
finder.import_package_later("cryptography")
def hook_cffi_cparser(finder, module):
finder.ignore("cffi._pycparser")
def hook_cffi(finder, module):
# We need to patch two methods in the
# cffi.vengine_cpy.VCPythonEngine class so that cffi libraries
# work from within zip-files.
finder.add_bootcode("""
def patch_cffi():
def find_module(self, module_name, path, so_suffixes):
import sys
name = "%s.%s" % (self.verifier.ext_package, module_name)
try:
__import__(name)
except ImportError:
return None
self.__module = mod = sys.modules[name]
return mod.__file__
def load_library(self):
from cffi import ffiplatform
import sys
# XXX review all usages of 'self' here!
# import it as a new extension module
module = self.__module
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
from cffi.vengine_cpy import VCPythonEngine
VCPythonEngine.find_module = find_module
VCPythonEngine.load_library = load_library
patch_cffi()
del patch_cffi
""")
def hook_multiprocessing(finder, module):
module.__globalnames__.add("AuthenticationError")
module.__globalnames__.add("BufferTooShort")
module.__globalnames__.add("Manager")
module.__globalnames__.add("TimeoutError")
module.__globalnames__.add("cpu_count")
module.__globalnames__.add("current_process")
module.__globalnames__.add("get_context")
module.__globalnames__.add("get_start_method")
module.__globalnames__.add("set_start_method")
module.__globalnames__.add("JoinableQueue")
module.__globalnames__.add("Lock")
module.__globalnames__.add("Process")
module.__globalnames__.add("Queue")
module.__globalnames__.add("freeze_support")
def import_psutil(finder, module):
"""Exclude stuff for other operating systems."""
finder.excludes.append("_psutil_bsd")
finder.excludes.append("_psutil_linux")
finder.excludes.append("_psutil_osx")
finder.excludes.append("_psutil_posix")
finder.excludes.append("_psutil_sunos")
def hook_PIL(finder, module):
# c:\Python33-64\lib\site-packages\PIL
"""Pillow loads plugins"""
# Exclude python 2 imports
finder.excludes.append("Tkinter")
finder.import_package_later("PIL")
def hook__socket(finder, module):
"""
_socket.pyd uses the 'idna' encoding; and that requires
'unicodedata.pyd'.
"""
finder.import_hook("encodings.idna")
finder.import_hook("unicodedata")
def hook_pyreadline(finder, module):
"""
"""
finder.ignore("IronPythonConsole")
finder.excludes.append("StringIO") # in pyreadline.py3k_compat
finder.ignore("System")
finder.excludes.append("sets")
finder.ignore("startup")
def hook_xml_etree_ElementTree(finder, module):
"""ElementC14N is an optional extension. Ignore if it is not
found.
"""
finder.ignore("ElementC14N")
def hook_urllib_request(finder, module):
"""urllib.request imports _scproxy on darwin
"""
finder.excludes.append("_scproxy")
def hook_pythoncom(finder, module):
"""pythoncom is a Python extension module with .dll extension,
usually in the windows system directory as pythoncom3X.dll.
"""
import pythoncom
finder.add_dll(pythoncom.__file__)
def hook_pywintypes(finder, module):
"""pywintypes is a Python extension module with .dll extension,
usually in the windows system directory as pywintypes3X.dll.
"""
import pywintypes
finder.add_dll(pywintypes.__file__)
def hook_win32com(finder, module):
"""The win32com package extends it's __path__ at runtime.
"""
finder.import_hook("pywintypes")
finder.import_hook("pythoncom")
import win32com
module.__path__ = win32com.__path__
def hook_win32api(finder, module):
"""win32api.FindFiles(...) needs this."""
#finder.import_hook("pywintypes")
finder.import_hook("win32timezone")
def hook_tkinter(finder, module):
"""Recusively copy tcl and tk directories.
"""
# It probably doesn't make sense to exclude tix from the tcl distribution,
# and only copy it when tkinter.tix is imported...
import tkinter._fix as fix
tcl_dir = os.path.normpath(os.path.join(fix.tcldir, ".."))
assert os.path.isdir(tcl_dir)
finder.add_datadirectory("tcl", tcl_dir, recursive=True)
finder.set_min_bundle("tkinter", 2)
def hook_six(finder, module):
"""six.py has an object 'moves'. This allows to import
modules/packages via attribute access under new names.
We install a fake module named 'six.moves' which simulates this
behaviour.
"""
class SixImporter(type(module)):
"""Simulate six.moves.
Import renamed modules when retrived as attributes.
"""
__code__ = None
def __init__(self, mf, *args, **kw):
import six
self.__moved_modules = {item.name: item.mod
for item in six._moved_attributes
if isinstance(item, six.MovedModule)}
super().__init__(*args, **kw)
self.__finder = mf
def __getattr__(self, name):
if name in self.__moved_modules:
renamed = self.__moved_modules[name]
self.__finder.safe_import_hook(renamed, caller=self)
mod = self.__finder.modules[renamed]
# add the module again with the renamed name:
self.__finder._add_module("six.moves." + name, mod)
return mod
else:
raise AttributeError(name)
m = SixImporter(finder,
None, "six.moves", finder._optimize)
finder._add_module("six.moves", m)
def hook_matplotlib(finder, module):
"""matplotlib requires data files in a 'mpl-data' subdirectory in
the same directory as the executable.
"""
# c:\Python33\lib\site-packages\matplotlib
mpl_data_path = os.path.join(os.path.dirname(module.__loader__.path),
"mpl-data")
finder.add_datadirectory("mpl-data", mpl_data_path, recursive=True)
finder.excludes.append("wx")
# XXX matplotlib requires tkinter which modulefinder does not
# detect because of the six bug.
def hook_numpy(finder, module):
"""numpy for Python 3 still tries to import some Python 2 modules;
exclude them."""
# I'm not sure if we can safely exclude these:
finder.ignore("Numeric")
finder.ignore("numarray")
finder.ignore("numpy_distutils")
finder.ignore("setuptools")
finder.ignore("Pyrex")
finder.ignore("nose")
finder.ignore("scipy")
def hook_nose(finder, module):
finder.ignore("IronPython")
finder.ignore("cStringIO")
finder.ignore("unittest2")
def hook_sysconfig(finder, module):
finder.ignore("_sysconfigdata")
def hook_numpy_random_mtrand(finder, module):
"""the numpy.random.mtrand module is an extension module and the
numpy.random module imports * from this module; define the list of
global names available to this module in order to avoid spurious
errors about missing modules.
"""
module.__globalnames__.add('RandomState')
module.__globalnames__.add('beta')
module.__globalnames__.add('binomial')
module.__globalnames__.add('bytes')
module.__globalnames__.add('chisquare')
module.__globalnames__.add('choice')
module.__globalnames__.add('dirichlet')
module.__globalnames__.add('exponential')
module.__globalnames__.add('f')
module.__globalnames__.add('gamma')
module.__globalnames__.add('geometric')
module.__globalnames__.add('get_state')
module.__globalnames__.add('gumbel')
module.__globalnames__.add('hypergeometric')
module.__globalnames__.add('laplace')
module.__globalnames__.add('logistic')
module.__globalnames__.add('lognormal')
module.__globalnames__.add('logseries')
module.__globalnames__.add('multinomial')
module.__globalnames__.add('multivariate_normal')
module.__globalnames__.add('negative_binomial')
module.__globalnames__.add('noncentral_chisquare')
module.__globalnames__.add('noncentral_f')
module.__globalnames__.add('normal')
module.__globalnames__.add('np')
module.__globalnames__.add('operator')
module.__globalnames__.add('pareto')
module.__globalnames__.add('permutation')
module.__globalnames__.add('poisson')
module.__globalnames__.add('power')
module.__globalnames__.add('rand')
module.__globalnames__.add('randint')
module.__globalnames__.add('randn')
module.__globalnames__.add('random_integers')
module.__globalnames__.add('random_sample')
module.__globalnames__.add('rayleigh')
module.__globalnames__.add('seed')
module.__globalnames__.add('set_state')
module.__globalnames__.add('shuffle')
module.__globalnames__.add('standard_cauchy')
module.__globalnames__.add('standard_exponential')
module.__globalnames__.add('standard_gamma')
module.__globalnames__.add('standard_normal')
module.__globalnames__.add('standard_t')
module.__globalnames__.add('triangular')
module.__globalnames__.add('uniform')
module.__globalnames__.add('vonmises')
module.__globalnames__.add('wald')
module.__globalnames__.add('weibull')
module.__globalnames__.add('zipf')
def hook_numpy_distutils(finder, module):
"""In a 'if sys.version_info[0] < 3:' block numpy.distutils does
an implicit relative import: 'import __config__'. This will not
work in Python3 so ignore it.
"""
finder.excludes.append("__config__")
def hook_numpy_f2py(finder, module):
""" numpy.f2py tries to import __svn_version__. Ignore when his fails.
"""
finder.excludes.append("__svn_version__")
def hook_numpy_core_umath(finder, module):
"""the numpy.core.umath module is an extension module and the numpy module
imports * from this module; define the list of global names available
to this module in order to avoid spurious errors about missing
modules"""
module.__globalnames__.add("add")
module.__globalnames__.add("absolute")
module.__globalnames__.add("arccos")
module.__globalnames__.add("arccosh")
module.__globalnames__.add("arcsin")
module.__globalnames__.add("arcsinh")
module.__globalnames__.add("arctan")
module.__globalnames__.add("arctanh")
module.__globalnames__.add("bitwise_and")
module.__globalnames__.add("bitwise_or")
module.__globalnames__.add("bitwise_xor")
module.__globalnames__.add("ceil")
module.__globalnames__.add("conjugate")
module.__globalnames__.add("cosh")
module.__globalnames__.add("divide")
module.__globalnames__.add("exp")
module.__globalnames__.add("e")
module.__globalnames__.add("fabs")
module.__globalnames__.add("floor")
module.__globalnames__.add("floor_divide")
module.__globalnames__.add("fmod")
module.__globalnames__.add("geterrobj")
module.__globalnames__.add("greater")
module.__globalnames__.add("hypot")
module.__globalnames__.add("invert")
module.__globalnames__.add("isfinite")
module.__globalnames__.add("isinf")
module.__globalnames__.add("isnan")
module.__globalnames__.add("less")
module.__globalnames__.add("left_shift")
module.__globalnames__.add("log")
module.__globalnames__.add("logical_and")
module.__globalnames__.add("logical_not")
module.__globalnames__.add("logical_or")
module.__globalnames__.add("logical_xor")
module.__globalnames__.add("maximum")
module.__globalnames__.add("minimum")
module.__globalnames__.add("multiply")
module.__globalnames__.add("negative")
module.__globalnames__.add("not_equal")
module.__globalnames__.add("power")
module.__globalnames__.add("remainder")
module.__globalnames__.add("right_shift")
module.__globalnames__.add("sign")
module.__globalnames__.add("signbit")
module.__globalnames__.add("sinh")
module.__globalnames__.add("sqrt")
module.__globalnames__.add("tan")
module.__globalnames__.add("tanh")
module.__globalnames__.add("true_divide")
def hook_numpy_core_numerictypes(finder, module):
"""the numpy.core.numerictypes module adds a number of items to itself
dynamically; define these to avoid spurious errors about missing
modules"""
module.__globalnames__.add("bool_")
module.__globalnames__.add("cdouble")
module.__globalnames__.add("complexfloating")
module.__globalnames__.add("csingle")
module.__globalnames__.add("double")
module.__globalnames__.add("longdouble")
module.__globalnames__.add("float32")
module.__globalnames__.add("float64")
module.__globalnames__.add("float_")
module.__globalnames__.add("inexact")
module.__globalnames__.add("integer")
module.__globalnames__.add("intc")
module.__globalnames__.add("int32")
module.__globalnames__.add("number")
module.__globalnames__.add("single")
def hook_numpy_core(finder, module):
finder.ignore("numpy.core._dotblas")
| gpl-2.0 |
idlead/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
musteryu/Data-Mining | assignment-黄煜-3120100937/question_4.py | 1 | 1258 | from mylib import *
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from time import time
if __name__ == '__main__':
DIR_PATH = sys.path[0] + '\\'
# normal distribution vector file
nvctr_file1 = DIR_PATH + 'normal_500_1.txt'
nvctr_file2 = DIR_PATH + 'normal_500_2.txt'
# uniform distribution vector file
uvctr_file1 = DIR_PATH + 'uniform_500_1.txt'
uvctr_file2 = DIR_PATH + 'uniform_500_2.txt'
# normal distribution matrix
nmtrx = fget_mtrx(nvctr_file1) + fget_mtrx(nvctr_file2)
# uniform distribution matrix
umtrx = fget_mtrx(uvctr_file1) + fget_mtrx(uvctr_file2)
# plist is list the numbers of dimensions after DCT compression
# nplist is for normal distribution data set
# uplist is for uniform distribution data set
nplist = []
uplist = []
for vector in nmtrx:
u, p = my_DCT_compression(vector, 0.01)
nplist.append(p)
for vector in umtrx:
u, p = my_DCT_compression(vector, 0.01)
uplist.append(p)
# draw histogram
plt.figure(1)
plt.subplot(2,1,1)
my_hist(nplist, bucket_size = 1, flat_edge = False, title = "For normal distribution data set")
plt.subplot(2,1,2)
my_hist(uplist, bucket_size = 1, flat_edge = False, title = "For uniform distribution data set")
plt.show()
| gpl-2.0 |
riddlezyc/geolab | src/structure/Z.py | 1 | 1474 | # -*- coding: utf-8 -*-
# from framesplit import trajectory
# too slow using this module
import matplotlib.pyplot as plt
dirName = r"F:\simulations\asphaltenes\na-mont\TMBO-oil\water\373-continue/"
xyzName = 'all.xyz'
hetero = 'O' # 'oh' 'N' 'sp' 'O' 'Np' 'sp'
with open(dirName + xyzName, 'r') as foo:
coords = foo.readlines()
nAtoms = int(coords[0])
nFrames = int(len(coords) / (nAtoms + 2))
pos = []
for i in range(nFrames):
istart = i * (nAtoms + 2)
iend = (i + 1) * (nAtoms + 2)
pos.append(coords[istart:iend])
# for i in range(200):
# print coords[i]
heteroatom = 0
# all of my molecules have atoms less than 200
for i in range(200):
x = pos[0][i].split()[0]
if x == hetero:
heteroatom = i
break
heteroZ = []
for p in pos:
# print p[heteroatom].split()[0]
zx = float(p[heteroatom].split()[3])
if zx < 10:
zx = zx + 80
heteroZ.append(zx)
with open(dirName + 'heteroZ.dat', 'w') as foo:
for i, z in enumerate(heteroZ):
print >> foo, "%3d %8.5f" % (i, z)
# energy plot
plt.figure(0, figsize=(8, 4))
figName = dirName + 'heteroZ.png'
plt.title('z of heteroatom', fontsize=20)
plt.plot(range(len(heteroZ)-1), heteroZ[1:], linewidth=2)
plt.grid(True)
plt.xlabel('steps')
plt.ylabel('Z')
plt.axis([0, len(heteroZ)*1.1, 0, max(heteroZ)*1.1])
plt.savefig(figName, format='png', dpi=300)
plt.close()
| gpl-3.0 |
leejw51/BumblebeeNet | Test/AddLayer.py | 1 | 1320 | import numpy as np
import matplotlib.pylab as plt
from MulLayer import MulLayer
class AddLayer:
def __init__ (self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx,dy
def test_add_layer():
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()
apple_price = mul_apple_layer.forward( apple, apple_num)
orange_price = mul_orange_layer.forward( orange, orange_num)
all_price = add_apple_orange_layer.forward( apple_price, orange_price)
price = mul_tax_layer.forward( all_price, tax)
dprice = 1
dall_price, dtax = mul_tax_layer.backward( dprice)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward( dapple_price)
print("price=", price)
print(dapple_num, dapple, dorange, dorange_num, dtax)
| mit |
joergsimon/gesture-analysis | analysis/feature_selection.py | 1 | 6744 | from analysis.preparation import labelMatrixToArray
from analysis.preparation import normalizeZeroClassArray
from visualise.trace_features import trace_feature_origin
from visualise.confusion_matrix import plot_confusion_matrix
import numpy as np
import sklearn
import sklearn.linear_model
import sklearn.preprocessing as pp
import sklearn.svm as svm
import sklearn.feature_selection as fs
from analysis.classification import fit_classifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Interesting References:
# RFECV:
# Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene selection for
# cancer classification using support vector machines. Mach. Learn.. 46(1-3). 389-422.
def feature_selection(train_data, train_labels, const):
train_labels_arr, exclude = labelMatrixToArray(train_labels, const.label_threshold)
train_data_clean = train_data.drop(exclude)
train_labels_arr, train_data_clean, _ = normalizeZeroClassArray(train_labels_arr, train_data_clean)
print "num features before selection: {}".format(train_data_clean.columns.size)
feature_index = variance_threshold(train_data_clean)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:,feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after variance threshold".format(clf_name))
print(classification_report(train_labels_arr,prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index,const)
feature_index = rfe(train_data_clean,train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFE".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = k_best_chi2(train_data_clean, train_labels_arr, 700)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after Chi2".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = rfe_cv_f1(train_data_clean, train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFECV".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
plt.show()
def get_values(data, feature_index, needs_scaling):
if needs_scaling:
values = data.values[:, feature_index]
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)
return values
else:
return data.values[:, feature_index]
def variance_threshold(train_data):
# feature selection using VarianceThreshold filter
sel = fs.VarianceThreshold(threshold=(.8 * (1 - .8)))
fit = sel.fit(train_data.values)
col_index = fit.get_support(indices=True)
print "num features selected by VarianceThreshold: {}".format(len(col_index))
return col_index
def rfe(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values) # pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels))
print "run rfecv.."
rfecv = fs.RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(values, np.array(train_labels))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFE(CV)/Lasso: {}".format(len(col_index))
return col_index
def rfe_cv_f1(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = svm.SVC(kernel="linear") #sklearn.linear_model.Lasso(alpha = 0.1)
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)#pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels).astype(int))
print "run rfecv.."
rfecv = fs.RFECV(estimator=svc, step=0.05, verbose=2)
rfecv.fit(values, np.array(train_labels).astype(int))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFECV/SVR: {}".format(len(col_index))
return col_index
def k_best_chi2(train_data, train_labels, k):
values = train_data.values
if values.min() < 0:
values = values + abs(values.min())
kb = fs.SelectKBest(fs.chi2, k=k)
kb.fit(values, np.array(train_labels))
col_index = kb.get_support(indices=True)
print "num features selected by K-Best using chi2: {}".format(len(col_index))
return col_index | apache-2.0 |
brodoll/sms-tools | lectures/09-Sound-description/plots-code/centroid.py | 23 | 1086 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
centroid = ess.Centroid(range=fs/2.0)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
centroids = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
centroid_val = centroid(mX)
centroids.append(centroid_val)
centroids = np.array(centroids)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
frmTime = H*np.arange(centroids.size)/float(fs)
plt.plot(frmTime, centroids, 'g', lw=1.5)
plt.axis([0, x.size/float(fs), min(centroids), max(centroids)])
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('spectral centroid')
plt.tight_layout()
plt.savefig('centroid.png')
plt.show()
| agpl-3.0 |
Rignak/Scripts-Python | DeepLearning/TagPrediction/TagPrediction.py | 1 | 10291 | import numpy as np
import matplotlib.pyplot as plt
import os
from os.path import join
import cv2
from skimage.transform import resize
from tqdm import tqdm
from datetime import datetime
import functools
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras import optimizers
from keras.models import Model, load_model
from keras.layers import Flatten, Dense, Conv2D, Dropout, MaxPooling2D, BatchNormalization, Input
from keras.callbacks import ModelCheckpoint, Callback
import json
import tensorflow as tf
tf.reset_default_graph()
from keras import backend as K
K.image_dim_ordering()
###################### Hyperparameters ######################
# Mode paramaters
INPUT_SHAPE = (256, 256, 3)
IMAGE_NUMBER = 30000
WEIGHT_FILENAME = os.path.join('models', 'tag_prediction.hdf5')
ROOT = 'dress'
VALIDATION_SPLIT = 0.9
TAG_END = "_dress"
FILE_END = 'S'
MIN_TAG_USE = 500
# Training parameters
BATCH_SIZE = 8
EPOCHS = 100
LEARNING_RATE = 1 * 10 ** -3
DROPOUT = 0.5
MOMENTUM = 0.5
WEIGHT_DECAY = 4 * 10 ** -5 # weight decay
ACTIVATION = 'selu'
NEURON_BASIS = 32
class PlotLearning(Callback):
def __init__(self, examples=False):
super().__init__()
self.examples = examples
self.x = []
self.losses, self.val_losses = [], []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(epoch)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
plt.yscale('log')
plt.plot(self.x, self.losses)
plt.plot(self.x, self.val_losses)
plt.xlabel('Epochs')
plt.ylabel('Crossentropy')
plt.legend(['Training', 'Validation'])
plt.tight_layout()
plt.savefig('plot.png')
plt.close()
if self.examples:
z = self.model.predict(self.model.example[0][:6])
plot_example(self.model.example[0][:6], self.model.example[1][:6], z, self.model.labels)
plt.savefig(f"epochs/epoch{self.x[-1]}.png")
plt.close()
def plot_example(xs, ys, zs, labels):
n = xs.shape[0]
plt.figure(figsize=(12, 8))
plt.tight_layout()
for i, (x, y, z) in enumerate(zip(xs, ys, zs)):
if i != 0:
tick_label = [' ' for label in labels]
else:
tick_label = labels
plt.subplot(3, n, i + 1)
plt.imshow(x)
plt.subplot(3, n, i + n + 1)
plt.barh(labels, y, tick_label=tick_label)
plt.xlim(0, 1)
plt.subplot(3, n, i + 2 * n + 1)
plt.barh(labels, z, tick_label=tick_label)
plt.xlim(0, 1)
def import_model(tag_number, input_shape=INPUT_SHAPE):
inputs = Input(input_shape)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(inputs)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (1, 1), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Flatten()(layer)
layer = BatchNormalization()(layer)
layer = Dense(512, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(2048, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(tag_number, activation='sigmoid')(layer)
model = Model(inputs=[inputs], outputs=[layer])
sgd = optimizers.SGD(lr=LEARNING_RATE, momentum=MOMENTUM, nesterov=True)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.summary()
return model
def get_tags(root, files, min_tag_use=MIN_TAG_USE, suffix=TAG_END):
with open(join('..', 'imgs', root + '.json'), 'r') as file:
tags = json.load(file)
tag_count = {}
files = [os.path.split(file)[-1] for file in files]
for key, value in tags.items():
if key + f'{FILE_END}.png' not in files:
continue
for tag in value.split():
if tag not in tag_count:
tag_count[tag] = 1
else:
tag_count[tag] += 1
with open(join('..', 'imgs', root + '_count.json'), 'w') as file:
json.dump(tag_count, file, sort_keys=True, indent=4)
print(f'Have {len(list(tag_count.keys()))} tags')
tags_count = {tag: count for tag, count in tag_count.items() if count > min_tag_use and tag.endswith(suffix)}
print(f'Keep tags with >{min_tag_use} use: {len(tag_count)} tags')
for tag, count in tags_count.items():
print(f'{tag}: {count}')
input('Continue?')
return tags, tags_count
def make_output(files, tags, tags_count):
output = {}
for file in tqdm(files):
i = os.path.splitext(os.path.split(file)[-1])[0]
if FILE_END:
i = i[:-1]
truth = tags[i].split()
output[file] = []
for tag in tags_count.keys():
if tag in truth:
output[file].append(1)
else:
output[file].append(0)
return output
def metrics(model, files, output, tags_count):
true_positives = np.zeros(len(output))
positives = np.zeros(len(output))
truth = np.zeros(len(output))
for file in tqdm(files):
img = image_process(file)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)[0]
for i, coef in enumerate(prediction):
f = tags_count.values()[i] / len(files)
if coef > f:
positives[i] += 1
if output[file][i] > f:
truth[i] += 1
if output[file][i] > f and coef > f:
true_positives[i] += 1
print('Tag\tPrecision\tRecall')
for i, k, l, key in zip(true_positives, positives, truth, tags_count.keys()):
if k != 0:
precision = int(i / k * 1000) / 100
else:
precision = 0
if l != 0:
recall = int(i / l * 1000) / 100
else:
recall = 0
print(f'{key}\t{precision}%\t{recall}%\t')
# @functools.lru_cache(maxsize=IMAGE_NUMBER)
def image_process(file):
img = cv2.imread(file)
img = img[:, :, [2, 1, 0]]
# img = resize(img, INPUT_SHAPE, mode='reflect', preserve_range=True, anti_aliasing=True)
return img
def generator(files, output, batch_size=BATCH_SIZE):
while True:
batch_files = np.random.choice(files, size=batch_size)
# j += 1
# print(index, j, [(k + j) % n for k in index], [(k + j) for k in index], index+j)
batch_output = np.array([output[file] for file in batch_files])
batch_input = np.zeros([batch_size] + [shape for shape in INPUT_SHAPE])
for i, file in enumerate(batch_files):
batch_input[i] = image_process(file)
yield batch_input / 255, batch_output
def train(model, files, output, tags_count, weight_filename=WEIGHT_FILENAME,
validation_split=VALIDATION_SPLIT, epochs=EPOCHS, batch_size=BATCH_SIZE):
class_weights = {i: len(files) / count for i, count in enumerate(tags_count.values())}
index = int(len(files) * validation_split)
training_generator = generator(files[:index], output)
validation_generator = generator(files[index:], output)
calls = [ModelCheckpoint(weight_filename, save_best_only=True),
PlotLearning(examples=True)]
model.example = next(validation_generator)
model.labels = list(tags_count.keys())
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
steps_per_epoch=int(len(files) * validation_split) // batch_size,
validation_steps=int(len(files) * (1 - validation_split)) // batch_size,
epochs=epochs,
callbacks=calls,
class_weight=class_weights
)
def test(files, output, tags_count, weight_filename=WEIGHT_FILENAME):
model = load_model(weight_filename)
metrics(model, files, output, tags_count)
image_generator = generator(files, output, batch_size=1)
fs = [count / len(files) for count in tags_count.values()]
fs = [0.5 for i in fs]
while True:
print('---')
im, truth = next(image_generator)
truth_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(truth[0]) if v > fs[j]])
print('TRUTH:', truth_string)
print(im.shape)
prediction = model.predict(im)[0]
prediction_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(prediction) if v > fs[j]])
print('PREDICTION:', prediction_string)
plt.imshow(im[0])
plt.show()
def main():
root = join('..', 'imgs', ROOT)
files = [join(root, folder, file) for folder in os.listdir(root) for file in os.listdir(join(root, folder))][
:IMAGE_NUMBER]
tags, tags_count = get_tags(ROOT, files)
output = make_output(files, tags, tags_count)
# test(files, output, tags_count)
model = import_model(len(tags_count))
train(model, files, output, tags_count)
print('DONE')
if __name__ == '__main__':
main()
| gpl-3.0 |
cogeorg/BlackRhino | networkx/drawing/nx_pylab.py | 1 | 32861 | # Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg (hagberg@lanl.gov)
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell']
def draw(G, pos=None, ax=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
raise
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
import collections
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = list(G)
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, collections.Iterable):
node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax)
alpha = None
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
"""Apply an alpha (or list of alphas) to the colors provided.
Parameters
----------
color : color string, or array of floats
Color of element. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
alpha : float or array of floats
Alpha values for elements. This can be a single alpha value, in
which case it will be applied to all the elements of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
elem_list : array of networkx objects
The list of elements which are being colored. These could be nodes, edges
or labels.
cmap : matplotlib colormap
Color map for use if colors is a list of floats corresponding to points on
a color mapping.
vmin, vmax : float
Minimum and maximum values for normalizing colors if a color mapping is used.
Returns
-------
rgba_colors : numpy ndarray
Array containing RGBA format values for each of the node colours.
"""
import numbers
import itertools
try:
import numpy
from matplotlib.colors import colorConverter
import matplotlib.cm as cm
except ImportError:
raise ImportError("Matplotlib required for draw()")
# If we have been provided with a list of numbers as long as elem_list, apply the color mapping.
if len(colors) == len(elem_list) and isinstance(colors[0], numbers.Number):
mapper = cm.ScalarMappable(cmap=cmap)
mapper.set_clim(vmin, vmax)
rgba_colors = mapper.to_rgba(colors)
# Otherwise, convert colors to matplotlib's RGB using the colorConverter object.
# These are converted to numpy ndarrays to be consistent with the to_rgba method of ScalarMappable.
else:
try:
rgba_colors = numpy.array([colorConverter.to_rgba(colors)])
except ValueError:
rgba_colors = numpy.array([colorConverter.to_rgba(color) for color in colors])
# Set the final column of the rgba_colors to have the relevant alpha values.
try:
# If alpha is longer than the number of colors, resize to the number of elements.
# Also, if rgba_colors.size (the number of elements of rgba_colors) is the same as the number of
# elements, resize the array, to avoid it being interpreted as a colormap by scatter()
if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
rgba_colors.resize((len(elem_list), 4))
rgba_colors[1:, 0] = rgba_colors[0, 0]
rgba_colors[1:, 1] = rgba_colors[0, 1]
rgba_colors[1:, 2] = rgba_colors[0, 2]
rgba_colors[:, 3] = list(itertools.islice(itertools.cycle(alpha), len(rgba_colors)))
except TypeError:
rgba_colors[:, -1] = alpha
return rgba_colors
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/dviread.py | 11 | 33923 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:].decode('ascii'))
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:].decode('ascii'))
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
:option:`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
| mit |
l11x0m7/Paper | Modulation/code/signal_analysis.py | 1 | 7322 | # -*- encoding:utf-8 -*-
import os
import sys
import logging
from copy import deepcopy
from matplotlib import pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
reload(sys)
def drawModulation(dirpath, rownum=200):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
filepaths = os.listdir(dirpath)
fileorder = 1
useful_filepaths = [f for f in filepaths if f.startswith('parse_mod')]
for filepath in useful_filepaths:
count = np.random.randint(1, rownum + 1)
with open(dirpath + '/' + filepath, 'rb') as fr:
x = list()
vals = list()
name = filepath
for i, line in enumerate(fr):
if i < count:
continue
if i > count:
break
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(2 * len(useful_filepaths), 1, fileorder * 2 - 1)
plt.plot(x, vals, color = ((fileorder * 20 + 25) % 255 / 255.,
(fileorder * 5 + 35) % 255 / 255.,
(fileorder * 30 + 45) % 255 / 255.))
plt.xlabel('symbol number')
plt.ylabel('signal amplitude')
plt.title(name)
fileorder += 1
plt.show()
def drawMixSignal(filepath, sample=5):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
with open(filepath, 'rb') as fr:
rowNumber = sum(1 for _ in fr)
with open(filepath, 'rb') as fr:
sampleSignals = set(np.random.choice(range(rowNumber), sample, replace=False))
rowOrder = 1
for i, line in enumerate(fr):
if i not in sampleSignals:
continue
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(sample, 1, rowOrder)
plt.plot(x, vals, color = ((rowOrder * 20 + 25) % 255 / 255.,
(rowOrder * 5 + 35) % 255 / 255.,
(rowOrder * 30 + 45) % 255 / 255.))
rowOrder += 1
plt.show()
def mixSignalAndTagging(dirpath='../data', savepath='../data/mixSignals.txt', modeSize=[]):
"""信号混叠和标注
对已有的信号进行混叠.
1-7分别对应:2ASK、QPSK、2FSK、2ASK+QPSK、2ASK+2FSK、QPSK+2FSK、2ASK+QPSK+2FSK
:param dirpath: signal path
:param modeSize: the sample size in each mode, from `1` to `n`
:return: mixed signal
"""
def tagger(tag):
"""
给样本打标签,目前手动填写标签类型
:param tag: like `1\t2`, `0\t2`, `0\t1\t2`
:return: `int` from 1 to 7 representing label
"""
if tag == '\t'.join(['0', ]):
return 1
elif tag == '\t'.join(['1', ]):
return 2
elif tag == '\t'.join(['2', ]):
return 3
elif tag == '\t'.join(['0', '1']):
return 4
elif tag == '\t'.join(['0', '2']):
return 5
elif tag == '\t'.join(['1', '2']):
return 6
elif tag == '\t'.join(['0', '1', '2']):
return 7
def C(n, m):
def calcNext(count, point, l, r, res, pre):
if(point > r):
return
if count == 1:
for i in xrange(point, r + 1):
pre.append(i)
res.append(deepcopy(pre))
pre.pop()
else:
for i in xrange(point, r + 1):
pre.append(i)
calcNext(count - 1, i + 1, l, r, res, pre)
pre.pop()
res = list()
calcNext(m, 0, 0, n - 1, res, [])
return res
files = os.listdir(dirpath)
signals = {}
for filepath in files:
if not filepath.startswith('parse_'):
continue
with open(dirpath + '/' + filepath, 'rb') as fr:
modName = filepath.split('parse_mod_')[1].split('.txt')[0]
signal = list()
for line in fr:
amps = line.strip().split('\t')
amps = map(float, amps)
signal.append(amps)
# signal = zip(*signal)
# signal = np.tile(signal, (20, 1))
signals[modName] = signal
modTypes = np.asarray(signals.keys())
modeNum = len(modTypes)
totalSignals = np.array([])
totalTags = list()
for mixNum in xrange(1, modeNum + 1):
groupIndeces = C(modeNum, mixNum)
groupNum = len(groupIndeces)
sampleEachMod = modeSize[mixNum - 1] // groupNum
groupSignals = np.array([])
for groupInd in groupIndeces:
mixSignals = np.array([])
tag = '\t'.join(map(str, sorted(groupInd)))
tag = str(tagger(tag))
while len(mixSignals) < sampleEachMod:
mixSignal = np.zeros([len(signals[modTypes[0]]), len(signals[modTypes[0]][0])])
for ind in groupInd:
curSignal = np.asarray(signals[modTypes[ind]])
randomIndeces = np.random.choice(len(curSignal), len(curSignal), replace=False)
randSignal = curSignal[randomIndeces]
mixSignal += randSignal
mixSignals = np.concatenate([mixSignals, mixSignal]) if mixSignals.shape[0] != 0 else mixSignal
mixSignals = mixSignals[:sampleEachMod, :]
totalTags.extend([tag] * sampleEachMod)
groupSignals = np.concatenate([groupSignals, mixSignals]) if groupSignals.shape[0] != 0 else mixSignals
totalSignals = np.concatenate([totalSignals, groupSignals]) if totalSignals.shape[0] != 0 else groupSignals
assert len(totalTags) == sum(modeSize)
assert len(totalSignals) == sum(modeSize)
indeces = np.random.choice(len(totalSignals), len(totalSignals), replace=False)
totalSignals = np.asarray(totalSignals)[indeces]
totalTags = np.asarray(totalTags)[indeces]
with open(savepath, 'wb') as fw:
for i in xrange(len(totalTags)):
signal = totalSignals[i]
signal = map(str, signal)
tag = totalTags[i]
fw.write('\t'.join(['\t'.join(signal), tag]) + '\n')
def split(filepath):
with open(filepath, 'rb') as fr:
X = list()
for line in fr:
X.append(line.strip())
X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
filename = filepath.split('/')[-1]
dirbase = filepath.split('/')[:-1]
with open('/'.join(dirbase + ['train_' + filename]), 'wb') as fw:
for line in X_train:
fw.write(line + '\n')
with open('/'.join(dirbase + ['test_' + filename]), 'wb') as fw:
for line in X_test:
fw.write(line + '\n')
if __name__ == '__main__':
# drawModulation('../data/5dB')
drawMixSignal('../data/50dB/mixSignals.txt')
# mixSignalAndTagging('../data/5dB', '../data/5dB/mixSignals.txt', [600, 1500, 2000])
# split('../data/5dB/mixSignals.txt')
| apache-2.0 |
decebel/librosa | librosa/filters.py | 2 | 24021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
dct
mel
chroma
constant_q
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
window_bandwidth
Deprecated
----------
.. autosummary::
:toctree: generated/
logfrequency
"""
import numpy as np
import scipy
import scipy.signal
import warnings
from . import cache
from . import util
from .util.exceptions import ParameterError
from .core.time_frequency import note_to_hz, hz_to_midi, hz_to_octs
from .core.time_frequency import fft_frequencies, mel_frequencies
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = dict(hann=0.725)
__all__ = ['dct',
'mel',
'chroma',
'constant_q',
'constant_q_lengths',
'cq_to_chroma',
'window_bandwidth',
# Deprecated
'logfrequency']
@cache
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
@cache
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : int > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
freqs = mel_frequencies(n_mels + 2,
fmin=fmin,
fmax=fmax,
htk=htk)
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (freqs[2:n_mels+2] - freqs[:n_mels])
for i in range(n_mels):
# lower and upper slopes for all bins
lower = (fftfreqs - freqs[i]) / (freqs[i+1] - freqs[i])
upper = (freqs[i+2] - fftfreqs) / (freqs[i+2] - freqs[i+1])
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper)) * enorm[i]
return weights
@cache
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : int > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)])
@util.decorators.deprecated('0.4', '0.5')
def logfrequency(sr, n_fft, n_bins=84, bins_per_octave=12, tuning=0.0,
fmin=None, spread=0.125): # pragma: no cover
'''Approximate a constant-Q filter bank for a fixed-window STFT.
Each filter is a log-normal window centered at the corresponding frequency.
.. warning:: Deprecated in librosa 0.4
Parameters
----------
sr : int > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
n_bins : int > 0 [scalar]
Number of bins. Defaults to 84 (7 octaves).
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Defaults to 12 (semitones).
tuning : None or float in `[-0.5, +0.5]` [scalar]
Tuning correction parameter, in fractions of a bin.
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
spread : float > 0 [scalar]
Spread of each filter, as a fraction of a bin.
Returns
-------
C : np.ndarray [shape=(n_bins, 1 + n_fft/2)]
log-frequency filter bank.
Examples
--------
Simple log frequency filters
>>> logfb = librosa.filters.logfrequency(22050, 4096)
>>> logfb
array([[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.],
...,
[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.]])
Use a narrower frequency range
>>> librosa.filters.logfrequency(22050, 4096, n_bins=48, fmin=110)
array([[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.],
...,
[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.]])
Use narrower filters for sparser response: 5% of a semitone
>>> librosa.filters.logfrequency(22050, 4096, spread=0.05)
Or wider: 50% of a semitone
>>> librosa.filters.logfrequency(22050, 4096, spread=0.5)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(logfb, x_axis='linear')
>>> plt.ylabel('Logarithmic filters')
>>> plt.title('Log-frequency filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
# What's the shape parameter for our log-normal filters?
sigma = float(spread) / bins_per_octave
# Construct the output matrix
basis = np.zeros((n_bins, int(1 + n_fft/2)))
# Get log frequencies of bins
log_freqs = np.log2(fft_frequencies(sr, n_fft)[1:])
for i in range(n_bins):
# What's the center (median) frequency of this filter?
c_freq = correction * fmin * (2.0**(float(i) / bins_per_octave))
# Place a log-normal window around c_freq
basis[i, 1:] = np.exp(-0.5 * ((log_freqs - np.log2(c_freq)) / sigma)**2
- np.log2(sigma) - log_freqs)
# Normalize the filters
basis = util.normalize(basis, norm=1, axis=1)
return basis
def __float_window(window_function):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = window_function(n, *args, **kwargs)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
@cache
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window=None, resolution=2, pad_fft=True, norm=1, **kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : int > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : function or `None`
Windowing function to apply to filters.
Default: `scipy.signal.hann`
resolution : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a longer window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, resolution=3)
Plot one octave of filters in time and frequency
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes)[:12]):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(range(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(range(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
if window is None:
window = scipy.signal.hann
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
resolution=resolution)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(resolution) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(ilen, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(ilen)
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = np.ceil(max_len)
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters])
return filters, np.asarray(lengths)
@cache
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', resolution=2):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : int > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
resolution : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if resolution <= 0:
raise ParameterError('resolution must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(resolution) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if np.any(freq * (1 + window_bandwidth(window) / Q) > sr / 2.0):
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
@cache
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr)
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.logamplitude(CQT**2,
... ref_power=np.max),
... y_axis='cqt_note', x_axis='time')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chromagram(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(float)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
def window_bandwidth(window, default=1.0):
'''Get the bandwidth of a window function.
If the window function is unknown, return a default value.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
default : float >= 0
The default value, if `window` is unknown.
Returns
-------
bandwidth : float
The bandwidth of the given window function
See Also
--------
scipy.signal.windows
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
warnings.warn("Unknown window function '{:s}'.".format(key))
return WINDOW_BANDWIDTHS.get(key, default)
| isc |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/datasets/browse_norb.py | 44 | 15741 | #!/usr/bin/env python
"""
A browser for the NORB and small NORB datasets. Navigate the images by
choosing the values for the label vector. Note that for the 'big' NORB
dataset, you can only set the first 5 label dimensions. You can then cycle
through the 3-12 images that fit those labels.
"""
import sys
import os
import argparse
import numpy
import warnings
try:
import matplotlib
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
matplotlib = None
pyplot = None
from pylearn2.datasets.new_norb import NORB
from pylearn2.utils import safe_zip, serial
def _parse_args():
parser = argparse.ArgumentParser(
description="Browser for NORB dataset.")
parser.add_argument('--which_norb',
type=str,
required=False,
choices=('big', 'small'),
help="'Selects the (big) NORB, or the Small NORB.")
parser.add_argument('--which_set',
type=str,
required=False,
choices=('train', 'test', 'both'),
help="'train', or 'test'")
parser.add_argument('--pkl',
type=str,
required=False,
help=".pkl file of NORB dataset")
parser.add_argument('--stereo_viewer',
action='store_true',
help="Swaps left and right stereo images, so you "
"can see them in 3D by crossing your eyes.")
parser.add_argument('--no_norm',
action='store_true',
help="Don't normalize pixel values")
result = parser.parse_args()
if (result.pkl is not None) == (result.which_norb is not None or
result.which_set is not None):
print("Must supply either --pkl, or both --which_norb and "
"--which_set.")
sys.exit(1)
if (result.which_norb is None) != (result.which_set is None):
print("When not supplying --pkl, you must supply both "
"--which_norb and --which_set.")
sys.exit(1)
if result.pkl is not None:
if not result.pkl.endswith('.pkl'):
print("--pkl must be a filename that ends in .pkl")
sys.exit(1)
if not os.path.isfile(result.pkl):
print("couldn't find --pkl file '%s'" % result.pkl)
sys.exit(1)
return result
def _make_grid_to_short_label(dataset):
"""
Returns an array x such that x[a][b] gives label index a's b'th unique
value. In other words, it maps label grid indices a, b to the
corresponding label value.
"""
unique_values = [sorted(list(frozenset(column)))
for column
in dataset.y[:, :5].transpose()]
# If dataset contains blank images, removes the '-1' labels
# corresponding to blank images, since they aren't contained in the
# label grid.
category_index = dataset.label_name_to_index['category']
unique_categories = unique_values[category_index]
category_to_name = dataset.label_to_value_funcs[category_index]
if any(category_to_name(category) == 'blank'
for category in unique_categories):
for d in range(1, len(unique_values)):
assert unique_values[d][0] == -1, ("unique_values: %s" %
str(unique_values))
unique_values[d] = unique_values[d][1:]
return unique_values
def _get_blank_label(dataset):
"""
Returns the label vector associated with blank images.
If dataset is a Small NORB (i.e. it has no blank images), this returns
None.
"""
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
# Returns None if there is no 'blank' category (e.g. if we're using
# the small NORB dataset.
return None
assert blank_name == 'blank'
blank_rowmask = dataset.y[:, category_index] == blank_label
blank_labels = dataset.y[blank_rowmask, :]
if not blank_rowmask.any():
return None
if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]):
raise ValueError("Expected all labels of category 'blank' to have "
"the same value, but they differed.")
return blank_labels[0, :].copy()
def _make_label_to_row_indices(labels):
"""
Returns a map from short labels (the first 5 elements of the label
vector) to the list of row indices of rows in the dense design matrix
with that label.
For Small NORB, all unique short labels have exactly one row index.
For big NORB, a short label can have 0-N row indices.
"""
result = {}
for row_index, label in enumerate(labels):
short_label = tuple(label[:5])
if result.get(short_label, None) is None:
result[short_label] = []
result[short_label].append(row_index)
return result
def main():
"""Top-level function."""
args = _parse_args()
if args.pkl is not None:
dataset = serial.load(args.pkl)
else:
dataset = NORB(args.which_norb, args.which_set)
# Indexes into the first 5 labels, which live on a 5-D grid.
grid_indices = [0, ] * 5
grid_to_short_label = _make_grid_to_short_label(dataset)
# Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
# that have those labels.
label_to_row_indices = _make_label_to_row_indices(dataset.y)
# Indexes into the row index lists returned by label_to_row_indices.
object_image_index = [0, ]
blank_image_index = [0, ]
blank_label = _get_blank_label(dataset)
# Index into grid_indices currently being edited
grid_dimension = [0, ]
dataset_is_stereo = 's' in dataset.view_converter.axes
figure, all_axes = pyplot.subplots(1,
3 if dataset_is_stereo else 2,
squeeze=True,
figsize=(10, 3.5))
set_name = (os.path.split(args.pkl)[1] if args.which_set is None
else "%sing set" % args.which_set)
figure.canvas.set_window_title("NORB dataset (%s)" % set_name)
label_text = figure.suptitle('Up/down arrows choose label, '
'left/right arrows change it',
x=0.1,
horizontalalignment="left")
# Hides axes' tick marks
for axes in all_axes:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
text_axes, image_axes = (all_axes[0], all_axes[1:])
image_captions = (('left', 'right') if dataset_is_stereo
else ('mono image', ))
if args.stereo_viewer:
image_captions = tuple(reversed(image_captions))
for image_ax, caption in safe_zip(image_axes, image_captions):
image_ax.set_title(caption)
text_axes.set_frame_on(False) # Hides background of text_axes
def is_blank(grid_indices):
assert len(grid_indices) == 5
assert all(x >= 0 for x in grid_indices)
ci = dataset.label_name_to_index['category'] # category index
category = grid_to_short_label[ci][grid_indices[ci]]
category_name = dataset.label_to_value_funcs[ci](category)
return category_name == 'blank'
def get_short_label(grid_indices):
"""
Returns the first 5 elements of the label vector pointed to by
grid_indices. We use the first 5, since they're the labels used by
both the 'big' and Small NORB datasets.
"""
# Need to special-case the 'blank' category, since it lies outside of
# the grid.
if is_blank(grid_indices): # won't happen with SmallNORB
return tuple(blank_label[:5])
else:
return tuple(grid_to_short_label[i][g]
for i, g in enumerate(grid_indices))
def get_row_indices(grid_indices):
short_label = get_short_label(grid_indices)
return label_to_row_indices.get(short_label, None)
axes_to_pixels = {}
def redraw(redraw_text, redraw_images):
row_indices = get_row_indices(grid_indices)
if row_indices is None:
row_index = None
image_index = 0
num_images = 0
else:
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)[0]
row_index = row_indices[image_index]
num_images = len(row_indices)
def draw_text():
if row_indices is None:
padding_length = dataset.y.shape[1] - len(grid_indices)
current_label = (tuple(get_short_label(grid_indices)) +
(0, ) * padding_length)
else:
current_label = dataset.y[row_index, :]
label_names = dataset.label_index_to_name
label_values = [label_to_value(label) for label_to_value, label
in safe_zip(dataset.label_to_value_funcs,
current_label)]
lines = ['%s: %s' % (t, v)
for t, v
in safe_zip(label_names, label_values)]
if dataset.y.shape[1] > 5:
# Inserts image number & blank line between editable and
# fixed labels.
lines = (lines[:5] +
['No such image' if num_images == 0
else 'image: %d of %d' % (image_index + 1,
num_images),
'\n'] +
lines[5:])
# prepends the current index's line with an arrow.
lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]
text_axes.clear()
# "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
text_axes.text(0, 0.5, # coords
'\n'.join(lines),
verticalalignment='center',
transform=text_axes.transAxes)
def draw_images():
if row_indices is None:
for axis in image_axes:
axis.clear()
else:
data_row = dataset.X[row_index:row_index + 1, :]
axes_names = dataset.view_converter.axes
assert len(axes_names) in (4, 5)
assert axes_names[0] == 'b'
assert axes_names[-3] == 0
assert axes_names[-2] == 1
assert axes_names[-1] == 'c'
def draw_image(image, axes):
assert len(image.shape) == 2
norm = matplotlib.colors.NoNorm() if args.no_norm else None
axes_to_pixels[axes] = image
axes.imshow(image, norm=norm, cmap='gray')
if 's' in axes_names:
image_pair = \
dataset.get_topological_view(mat=data_row,
single_tensor=True)
# Shaves off the singleton dimensions
# (batch # and channel #), leaving just 's', 0, and 1.
image_pair = tuple(image_pair[0, :, :, :, 0])
if args.stereo_viewer:
image_pair = tuple(reversed(image_pair))
for axis, image in safe_zip(image_axes, image_pair):
draw_image(image, axis)
else:
image = dataset.get_topological_view(mat=data_row)
image = image[0, :, :, 0]
draw_image(image, image_axes[0])
if redraw_text:
draw_text()
if redraw_images:
draw_images()
figure.canvas.draw()
default_status_text = ("mouseover image%s for pixel values" %
("" if len(image_axes) == 1 else "s"))
status_text = figure.text(0.5, 0.1, default_status_text)
def on_mouse_motion(event):
original_text = status_text.get_text()
if event.inaxes not in image_axes:
status_text.set_text(default_status_text)
else:
pixels = axes_to_pixels[event.inaxes]
row = int(event.ydata + .5)
col = int(event.xdata + .5)
status_text.set_text("Pixel value: %g" % pixels[row, col])
if status_text.get_text != original_text:
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_index_type(step):
num_dimensions = len(grid_indices)
if dataset.y.shape[1] > 5:
# If dataset is big NORB, add one for the image index
num_dimensions += 1
grid_dimension[0] = add_mod(grid_dimension[0],
step,
num_dimensions)
def incr_index(step):
assert step in (0, -1, 1), ("Step was %d" % step)
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)
if grid_dimension[0] == 5: # i.e. the image index
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# increment the image index
image_index[0] = add_mod(image_index[0],
step,
len(row_indices))
else:
# increment one of the grid indices
gd = grid_dimension[0]
grid_indices[gd] = add_mod(grid_indices[gd],
step,
len(grid_to_short_label[gd]))
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# some grid indices have 2 images instead of 3.
image_index[0] = min(image_index[0], len(row_indices))
# Disables left/right key if we're currently showing a blank,
# and the current index type is neither 'category' (0) nor
# 'image number' (5)
disable_left_right = (is_blank(grid_indices) and
not (grid_dimension[0] in (0, 5)))
if event.key == 'up':
incr_index_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_index_type(1)
redraw(True, False)
elif event.key == 'q':
sys.exit(0)
elif not disable_left_right:
if event.key == 'left':
incr_index(-1)
redraw(True, True)
elif event.key == 'right':
incr_index(1)
redraw(True, True)
figure.canvas.mpl_connect('key_press_event', on_key_press)
figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
jaidevd/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 83 | 5888 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |