repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ratnania/pigasus | doc/manual/include/demo/test_neumann_quartcircle.py | 1 | 2730 | #! /usr/bin/python
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
# ...
sin = np.sin ; cos = np.cos ; pi = np.pi ; exp = np.exp
# ...
#-----------------------------------
try:
nx = int(sys.argv[1])
except:
nx = 31
try:
ny = int(sys.argv[2])
except:
ny = 31
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
from igakit.cad_geometry import quart_circle as domain
geo = domain(n=[nx,ny],p=[px,py])
#-----------------------------------
# ...
# exact solution
# ...
R = 1.
r = 0.5
c = 1. # for neumann
#c = pi / (R**2-r**2) # for all dirichlet bc
u = lambda x,y : [ x * y * sin ( c * (R**2 - x**2 - y**2 )) ]
# ...
# ...
# rhs
# ...
f = lambda x,y : [4*c**2*x**3*y*sin(c*(R**2 - x**2 - y**2)) \
+ 4*c**2*x*y**3*sin(c*(R**2 - x**2 - y**2)) \
+ 12*c*x*y*cos(c*(R**2 - x**2 - y**2)) ]
# ...
# ...
# values of gradu.n at the boundary
# ...
gradu = lambda x,y : [-2*c*x**2*y*cos(c*(R**2 - x**2 - y**2)) + y*sin(c*(R**2
-
x**2
-
y**2)) \
,-2*c*x*y**2*cos(c*(R**2 - x**2 - y**2)) + x*sin(c*(R**2 - x**2 - y**2)) ]
def func_g (x,y) :
du = gradu (x, y)
return [ du[0] , du[1] ]
# ...
# ...
# values of u at the boundary
# ...
bc_neumann={}
bc_neumann [0,0] = func_g
Dirichlet = [[1,2,3]]
#AllDirichlet = True
# ...
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print "norm U = ", normU
# ...
# ...
if PLOT:
PDE.plot() ; plt.colorbar(); plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit |
devanshdalal/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
lordkman/burnman | examples/example_geotherms.py | 4 | 4049 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_geotherms
-----------------
This example shows each of the geotherms currently possible with BurnMan.
These are:
1. Brown and Shankland, 1981 :cite:`Brown1981`
2. Anderson, 1982 :cite:`anderson1982earth`
3. Watson and Baxter, 2007 :cite:`Watson2007`
4. linear extrapolation
5. Read in from file from user
6. Adiabatic from potential temperature and choice of mineral
*Uses:*
* :func:`burnman.geotherm.brown_shankland`
* :func:`burnman.geotherm.anderson`
* input geotherm file *input_geotherm/example_geotherm.txt* (optional)
* :class:`burnman.composite.Composite` for adiabat
*Demonstrates:*
* the available geotherms
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
# we want to evaluate several geotherms at these values
pressures = np.arange(9.0e9, 128e9, 3e9)
seismic_model = burnman.seismic.PREM()
depths = seismic_model.depth(pressures)
# load two builtin geotherms and evaluate the temperatures at all pressures
temperature1 = burnman.geotherm.brown_shankland(depths)
temperature2 = burnman.geotherm.anderson(depths)
# a geotherm is actually just a function that returns a list of temperatures given pressures in Pa
# so we can just write our own function
my_geotherm_function = lambda p: [1500 + (2500 - 1500) * x / 128e9 for x in p]
temperature3 = my_geotherm_function(pressures)
# what about a geotherm defined from datapoints given in a file (our
# inline)?
table = [[1e9, 1600], [30e9, 1700], [130e9, 2700]]
# this could also be loaded from a file, just uncomment this
# table = burnman.tools.read_table("input_geotherm/example_geotherm.txt")
table_pressure = np.array(table)[:, 0]
table_temperature = np.array(table)[:, 1]
my_geotherm_interpolate = lambda p: [np.interp(x, table_pressure,
table_temperature) for x in p]
temperature4 = my_geotherm_interpolate(pressures)
# finally, we can also calculate a self consistent
# geotherm for an assemblage of minerals
# based on self compression of the composite rock.
# First we need to define an assemblage
amount_perovskite = 0.8
fe_pv = 0.05
fe_pc = 0.2
pv = minerals.SLB_2011.mg_fe_perovskite()
pc = minerals.SLB_2011.ferropericlase()
pv.set_composition([1. - fe_pv, fe_pv, 0.])
pc.set_composition([1. - fe_pc, fe_pc])
example_rock = burnman.Composite(
[pv, pc], [amount_perovskite, 1.0 - amount_perovskite])
# next, define an anchor temperature at which we are starting.
# Perhaps 1500 K for the upper mantle
T0 = 1500.
# then generate temperature values using the self consistent function.
# This takes more time than the above methods
temperature5 = burnman.geotherm.adiabatic(pressures, T0, example_rock)
# you can also look at burnman/geotherm.py to see how the geotherms are
# implemented
plt.plot(pressures / 1e9, temperature1, '-r', label="Brown, Shankland")
plt.plot(pressures / 1e9, temperature2, '-c', label="Anderson")
plt.plot(pressures / 1e9, temperature3, '-b', label="handwritten linear")
plt.plot(pressures / 1e9, temperature4,
'-k', label="handwritten from table")
plt.plot(pressures / 1e9, temperature5, '-m',
label="Adiabat with pv (70%) and fp(30%)")
plt.legend(loc='lower right')
plt.xlim([8.5, 130])
plt.xlabel('Pressure/GPa')
plt.ylabel('Temperature')
plt.savefig("output_figures/example_geotherm.png")
plt.show()
| gpl-2.0 |
francesco-mannella/dmp-esn | parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves_angles_start_none/results/plot.py | 18 | 1043 | #!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
| gpl-2.0 |
flowersteam/SESM | SESM/pykinect.py | 2 | 3387 | import zmq
import numpy
import threading
from collections import namedtuple
Point2D = namedtuple('Point2D', ('x', 'y'))
Point3D = namedtuple('Point3D', ('x', 'y', 'z'))
Quaternion = namedtuple('Quaternion', ('x', 'y', 'z', 'w'))
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
@property
def to_np(self):
l = []
for j in self.joints:
p = getattr(self, j).position
l.append((p.x, p.y, p.z))
return numpy.array(l)
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = None
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.socket.connect('tcp://{}:{}'.format(addr, port))
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton = skeleton
def get_skeleton(self):
while True:
self.socket.send('Hello')
md = self.socket.recv_json()
msg = self.socket.recv()
skeleton_array = numpy.frombuffer(buffer(msg), dtype=md['dtype'])
skeleton_array = skeleton_array.reshape(md['shape'])
joints = []
for i in range(len(skeleton_joints)):
x, y, z, w = skeleton_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skeleton_array[i][4:6])
orientation = Quaternion(*skeleton_array[i][6:10])
joints.append(Joint(position, orientation, pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def draw_position(skel, ax):
xy, zy = [], []
if not skel:
return
for j in skeleton_joints:
p = getattr(skel, j).position
xy.append((p.x, p.y))
zy.append((p.z, p.y))
ax.set_xlim(-2, 5)
ax.set_ylim(-1.5, 1.5)
ax.scatter(zip(*xy)[0], zip(*xy)[1], 30, 'b')
ax.scatter(zip(*zy)[0], zip(*zy)[1], 30, 'r')
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
kinect_sensor = KinectSensor('193.50.110.210', 9999)
import skelangle
kinect_angle = skelangle.AngleFromSkel()
try:
while True:
ax.clear()
draw_position(kinect_sensor.tracked_skeleton, ax)
plt.draw()
time.sleep(0.1)
except KeyboardInterrupt:
plt.close('all')
| gpl-3.0 |
gwparikh/cvgui | grouping_calibration.py | 2 | 9402 | #!/usr/bin/env python
import os, sys, subprocess
import argparse
import subprocess
import threading
import timeit
from multiprocessing import Queue, Lock
from configobj import ConfigObj
from numpy import loadtxt
from numpy.linalg import inv
import matplotlib.pyplot as plt
import moving
from cvguipy import trajstorage, cvgenetic, cvconfig
"""
Grouping Calibration By Genetic Algorithm.
This script uses genetic algorithm to search for the best configuration.
It does not monitor RAM usage, therefore, CPU thrashing might be happened when number of parents (selection size) is too large.
"""
# class for genetic algorithm
class GeneticCompare(object):
def __init__(self, motalist, motplist, IDlist, cfg_list, lock):
self.motalist = motalist
self.motplist = motplist
self.IDlist = IDlist
self.cfg_list = cfg_list
self.lock = lock
# This is used for calculte fitness of individual in genetic algorithn.
# It is modified to create sqlite and cfg file before tuning computeClearMOT.
# NOTE errors show up when loading two same ID
def computeMOT(self, i):
# create sqlite and cfg file with id i
cfg_name = config_files +str(i)+'.cfg'
sql_name = sqlite_files +str(i)+'.sqlite'
open(cfg_name,'w').close()
config = ConfigObj(cfg_name)
cfg_list.write_config(i ,config)
command = ['cp', 'tracking_only.sqlite', sql_name]
process = subprocess.Popen(command)
process.wait()
command = ['trajextract.py', args.inputVideo, '-o', args.homography, '-t', cfg_name, '-d', sql_name, '--gf']
# suppress output of grouping extraction
devnull = open(os.devnull, 'wb')
process = subprocess.Popen(command, stdout = devnull)
process.wait()
obj = trajstorage.CVsqlite(sql_name)
print "loading", i
obj.loadObjects()
motp, mota, mt, mme, fpt, gt = moving.computeClearMOT(cdb.annotations, obj.objects, args.matchDistance, firstFrame, lastFrame)
if motp is None:
motp = 0
self.lock.acquire()
self.IDlist.put(i)
self.motplist.put(motp)
self.motalist.put(mota)
obj.close()
if args.PrintMOTA:
print("ID: mota:{} motp:{}".format(mota, motp))
self.lock.release()
return mota
if __name__ == '__main__' :
parser = argparse.ArgumentParser(description="compare all sqlites that are created by cfg_combination.py to the Annotated version to find the ID of the best configuration")
parser.add_argument('inputVideo', help= "input video filename")
parser.add_argument('-r', '--configuration-file', dest='range_cfg', help= "the configuration-file contain the range of configuration")
parser.add_argument('-t', '--traffintel-config', dest='traffintelConfig', help= "the TrafficIntelligence file to use for running the first extraction.")
parser.add_argument('-m', '--mask-File', dest='maskFilename', help="Name of the mask-File for trajextract")
parser.add_argument('-d', '--database-file', dest ='databaseFile', help ="Name of the databaseFile.")
parser.add_argument('-o', '--homography-file', dest ='homography', help = "Name of the homography file.", required = True)
parser.add_argument('-md', '--matching-distance', dest='matchDistance', help = "matchDistance", default = 10, type = float)
parser.add_argument('-a', '--accuracy', dest = 'accuracy', help = "accuracy parameter for genetic algorithm", type = int)
parser.add_argument('-p', '--population', dest = 'population', help = "population parameter for genetic algorithm", required = True, type = int)
parser.add_argument('-np', '--num-of-parents', dest = 'num_of_parents', help = "Number of parents that are selected each generation", type = int)
parser.add_argument('-mota', '--print-MOTA', dest='PrintMOTA', action = 'store_true', help = "Print MOTA for each ID.")
args = parser.parse_args()
os.mkdir('cfg_files')
os.mkdir('sql_files')
sqlite_files = "sql_files/Sqlite_ID_"
config_files = "cfg_files/Cfg_ID_"
# ------------------initialize annotated version if not existed ---------- #
# inputVideo check
if not os.path.exists(args.inputVideo):
print("Input video {} does not exist! Exiting...".format(args.inputVideo))
sys.exit(1)
# configuration file check
if args.range_cfg is None:
config = ConfigObj('range.cfg')
else:
config = ConfigObj(args.range_cfg)
# get configuration and put them to a List
cfg_list = cvconfig.CVConfigList()
thread_cfgtolist = threading.Thread(target = cvconfig.config_to_list, args = (cfg_list, config))
thread_cfgtolist.start();
# check if dbfile name is entered
if args.databaseFile is None:
print("Database-file is not entered, running trajextract and cvplayer.")
if not os.path.exists(args.homography):
print("Homography file does not exist! Exiting...")
sys.exit(1)
else:
videofile=args.inputVideo
if 'avi' in videofile:
if args.maskFilename is not None:
command = ['trajextract.py',args.inputVideo,'-m', args.maskFilename,'-o', args.homography]
else:
command = ['trajextract.py',args.inputVideo,'-o', args.homography]
process = subprocess.Popen(command)
process.wait()
databaseFile = videofile.replace('avi','sqlite')
command = ['cvplayer.py',args.inputVideo,'-d',databaseFile,'-o',args.homography]
process = subprocess.Popen(command)
process.wait()
else:
print("Input video {} is not 'avi' type. Exiting...".format(args.inputVideo))
sys.exit(1)
else:
databaseFile = args.databaseFile
thread_cfgtolist.join()
# ------------------Done initialization for annotation-------------------- #
# create first tracking only database template.
print("creating the first tracking only database template.")
if args.maskFilename is not None:
command = map(str, ['trajextract.py',args.inputVideo, '-d', 'tracking_only.sqlite', '-t', args.traffintelConfig, '-o', args.homography, '-m', args.maskFilename, '--tf'])
else:
command = map(str, ['trajextract.py',args.inputVideo, '-d', sql_name, '-t', args.traffintelConfig, '-o', args.homography, '--tf'])
process = subprocess.Popen(command)
process.wait()
# ----start using genetic algorithm to search for best configuration-------#
start = timeit.default_timer()
dbfile = databaseFile;
homography = loadtxt(args.homography)
cdb = trajstorage.CVsqlite(dbfile)
cdb.open()
cdb.getLatestAnnotation()
cdb.createBoundingBoxTable(cdb.latestannotations, inv(homography))
cdb.loadAnnotaion()
for a in cdb.annotations:
a.computeCentroidTrajectory(homography)
print "Latest Annotaions in "+dbfile+": ", cdb.latestannotations
cdb.frameNumbers = cdb.getFrameList()
firstFrame = cdb.frameNumbers[0]
lastFrame = cdb.frameNumbers[-1]
foundmota = Queue()
foundmotp = Queue()
IDs = Queue()
lock = Lock()
Comp = GeneticCompare(foundmota, foundmotp, IDs, cfg_list, lock)
if args.accuracy != None:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT, args.accuracy)
else:
GeneticCal = cvgenetic.CVGenetic(args.population, cfg_list, Comp.computeMOT)
if args.num_of_parents != None:
GeneticCal.run_thread(args.num_of_parents)
else:
GeneticCal.run_thread()
# tranform queues to lists
foundmota = cvgenetic.Queue_to_list(foundmota)
foundmotp = cvgenetic.Queue_to_list(foundmotp)
IDs = cvgenetic.Queue_to_list(IDs)
for i in range(len(foundmotp)):
foundmotp[i] /= args.matchDistance
Best_mota = max(foundmota)
Best_ID = IDs[foundmota.index(Best_mota)]
print "Best multiple object tracking accuracy (MOTA)", Best_mota
print "ID:", Best_ID
stop = timeit.default_timer()
print str(stop-start) + "s"
total = []
for i in range(len(foundmota)):
total.append(foundmota[i]- 0.1 * foundmotp[i])
Best_total = max(total)
Best_total_ID = IDs[total.index(Best_total)]
# ------------------------------Done searching----------------------------#
# use matplot to plot a graph of all calculated IDs along with thier mota
plt.figure(1)
plt.plot(foundmota ,IDs ,'bo')
plt.plot(foundmotp ,IDs ,'yo')
plt.plot(Best_mota, Best_ID, 'ro')
plt.axis([-1, 1, -1, cfg_list.get_total_combination()])
plt.xlabel('mota')
plt.ylabel('ID')
plt.title(b'Best MOTA: '+str(Best_mota) +'\nwith ID: '+str(Best_ID))
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_mota.png'
plt.savefig(plotFile)
plt.figure(2)
plt.plot(total, IDs, 'bo')
plt.plot(Best_total, Best_total_ID, 'ro')
plt.xlabel('mota + motp')
plt.ylabel('ID')
plt.title(b'Best total: '+str(Best_total) +'\nwith ID: '+str(Best_total_ID))
# save the plot
plotFile = os.path.splitext(dbfile)[0] + '_CalibrationResult_motp.png'
plt.savefig(plotFile)
plt.show()
cdb.close()
| mit |
keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/ammonia.py | 1 | 28836 | """
========================================
Ammonia inversion transition TKIN fitter
========================================
Ammonia inversion transition TKIN fitter translated from Erik Rosolowsky's
http://svn.ok.ubc.ca/svn/signals/nh3fit/
.. moduleauthor:: Adam Ginsburg <adam.g.ginsburg@gmail.com>
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from pyspeckit.spectrum.parinfo import ParinfoList,Parinfo
import fitter
import matplotlib.cbook as mpcb
import copy
import model
line_names = ['oneone','twotwo','threethree','fourfour']
freq_dict = {
'oneone': 23.694506e9,
'twotwo': 23.722633335e9,
'threethree': 23.8701296e9,
'fourfour': 24.1394169e9,
}
aval_dict = {
'oneone': 1.712e-7, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 2.291e-7, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 2.625e-7, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
'fourfour': 3.167e-7, #64*!pi**4/(3*h*c**3)*nu44**3*mu0**2*(4/5.)
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': True,
'fourfour': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [19.8513, 19.3159, 7.88669, 7.46967, 7.35132, 0.460409, 0.322042,
-0.0751680, -0.213003, 0.311034, 0.192266, -0.132382, -0.250923, -7.23349,
-7.37280, -7.81526, -19.4117, -19.5500],
'twotwo':[26.5263, 26.0111, 25.9505, 16.3917, 16.3793, 15.8642, 0.562503,
0.528408, 0.523745, 0.0132820, -0.00379100, -0.0132820, -0.501831,
-0.531340, -0.589080, -15.8547, -16.3698, -16.3822, -25.9505, -26.0111,
-26.5263],
'threethree':[29.195098, 29.044147, 28.941877, 28.911408, 21.234827,
21.214619, 21.136387, 21.087456, 1.005122, 0.806082, 0.778062,
0.628569, 0.016754, -0.005589, -0.013401, -0.639734, -0.744554,
-1.031924, -21.125222, -21.203441, -21.223649, -21.076291, -28.908067,
-28.938523, -29.040794, -29.191744],
'fourfour':[ 0. , -30.49783692, 30.49783692, 0., 24.25907811,
-24.25907811, 0. ]
}
tau_wts_dict = {
'oneone': [0.0740740, 0.148148, 0.0925930, 0.166667, 0.0185190, 0.0370370,
0.0185190, 0.0185190, 0.0925930, 0.0333330, 0.300000, 0.466667,
0.0333330, 0.0925930, 0.0185190, 0.166667, 0.0740740, 0.148148],
'twotwo': [0.00418600, 0.0376740, 0.0209300, 0.0372090, 0.0260470,
0.00186000, 0.0209300, 0.0116280, 0.0106310, 0.267442, 0.499668,
0.146512, 0.0116280, 0.0106310, 0.0209300, 0.00186000, 0.0260470,
0.0372090, 0.0209300, 0.0376740, 0.00418600],
'threethree': [0.012263, 0.008409, 0.003434, 0.005494, 0.006652, 0.008852,
0.004967, 0.011589, 0.019228, 0.010387, 0.010820, 0.009482, 0.293302,
0.459109, 0.177372, 0.009482, 0.010820, 0.019228, 0.004967, 0.008852,
0.006652, 0.011589, 0.005494, 0.003434, 0.008409, 0.012263],
'fourfour': [0.2431, 0.0162, 0.0162, 0.3008, 0.0163, 0.0163, 0.3911]}
def ammonia(xarr, tkin=20, tex=None, ntot=1e14, width=1,
xoff_v=0.0, fortho=0.0, tau=None, fillingfraction=None, return_tau=False,
thin=False, verbose=False, return_components=False, debug=False ):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
ntot can be specified as a column density (e.g., 10^15) or a log-column-density (e.g., 15)
tex can be specified or can be assumed LTE if unspecified, if tex>tkin, or if "thin"
is specified
"thin" uses a different parametetrization and requires only the optical depth, width, offset,
and tkin to be specified. In the 'thin' approximation, tex is not used in computation of
the partition function - LTE is implicitly assumed
If tau is specified, ntot is NOT fit but is set to a fixed value
fillingfraction is an arbitrary scaling factor to apply to the model
fortho is the ortho/(ortho+para) fraction. The default is to assume all ortho.
xoff_v is the velocity offset in km/s
tau refers to the optical depth of the 1-1 line. The optical depths of the
other lines are fixed relative to tau_oneone
(not implemented) if tau is specified, ntot is ignored
"""
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('GHz')
if tex is not None:
if tex > tkin: # cannot have Tex > Tkin
tex = tkin
elif thin: # tex is not used in this case
tex = tkin
else:
tex = tkin
if thin:
ntot = 1e15
elif 5 < ntot < 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
ntot = 10**ntot
elif (25 < ntot < 1e5) or (ntot < 5):
# these are totally invalid for log/non-log
return 0
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
ckms = 2.99792458e5
ccms = ckms*1e5
g1 = 1
g2 = 1
h = 6.6260693e-27
kb = 1.3806505e-16
mu0 = 1.476e-18 # Dipole Moment in cgs (1.476 Debeye)
# Generate Partition Functions
nlevs = 51
jv=np.arange(nlevs)
ortho = jv % 3 == 0
para = True-ortho
Jpara = jv[para]
Jortho = jv[ortho]
Brot = 298117.06e6
Crot = 186726.36e6
runspec = np.zeros(len(xarr))
tau_dict = {}
para_count = 0
ortho_count = 1 # ignore 0-0
if tau is not None and thin:
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic temperature
Tex is still a free parameter in the final spectrum calculation at the bottom
(technically, I think this process assumes LTE; Tex should come into play in
these equations, not just the final one)
"""
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
else:
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
Given the complexity of these equations, it would be worth my while to
comment each step carefully.
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*tkin))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*tkin))
for linename in line_names:
if ortho_dict[linename]:
orthoparafrac = fortho
Z = Zortho
count = ortho_count
ortho_count += 1
else:
orthoparafrac = 1.0-fortho
Z = Zpara
count = para_count # need to treat partition function separately
para_count += 1
tau_dict[linename] = (ntot * orthoparafrac * Z[count]/(Z.sum()) / ( 1
+ np.exp(-h*freq_dict[linename]/(kb*tkin) )) * ccms**2 /
(8*np.pi*freq_dict[linename]**2) * aval_dict[linename]*
(1-np.exp(-h*freq_dict[linename]/(kb*tex))) /
(width/ckms*freq_dict[linename]*np.sqrt(2*np.pi)) )
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None and not thin:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in tau_dict.iteritems():
tau_dict[linename] = t * tau/tau11_temp
components =[]
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,no in enumerate(nuoff):
tauprof += (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr+no-lines[kk])**2 / (2.0*nuwidth[kk]**2)) *
fillingfraction)
components.append( tauprof )
T0 = (h*xarr*1e9/kb) # "temperature" of wavelength
if tau is not None and thin:
#runspec = tauprof+runspec
# is there ever a case where you want to ignore the optical depth function? I think no
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
else:
runspec = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-tauprof))+runspec
if runspec.min() < 0:
raise ValueError("Model dropped below zero. That is not possible normally. Here are the input values: "+
("tex: %f " % tex) +
("tkin: %f " % tkin) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
print "tkin: %g tex: %g ntot: %g width: %g xoff_v: %g fortho: %g fillingfraction: %g" % (tkin,tex,ntot,width,xoff_v,fortho,fillingfraction)
if return_components:
return (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/2.73)-1))*(1-np.exp(-1*np.array(components)))
if return_tau:
return tau_dict
return runspec
class ammonia_model(model.SpectralModel):
def __init__(self,npeaks=1,npars=6,multisingle='multi',**kwargs):
self.npeaks = npeaks
self.npars = npars
self._default_parnames = ['tkin','tex','ntot','width','xoff_v','fortho']
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
if multisingle in ('multi','single'):
self.multisingle = multisingle
else:
raise Exception("multisingle must be multi or single")
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'tkin' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],2.73), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
def __call__(self,*args,**kwargs):
#if 'use_lmfit' in kwargs: kwargs.pop('use_lmfit')
use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else self.use_lmfit
if use_lmfit:
return self.lmfitter(*args,**kwargs)
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
tkin,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
tkin,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)/len(parnames)) for p in parnames]
npars = len(parvals) / self.npeaks
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
else:
npars = len(parvals) / self.npeaks
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in xrange(self.npeaks):
modelkwargs = kwargs.copy()
for ii in xrange(npars):
name = parnames[ii+jj*npars].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*npars]})
v += ammonia(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False):
"""
Ammonia components don't follow the default, since in Galactic astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to pass hyperfine to the plot_fit call
"""
comps=[]
for ii in xrange(self.npeaks):
if hyperfine:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( ammonia(xarr,return_components=True,**modelkwargs) )
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],pars[ii*self.npars:(ii+1)*self.npars]))
comps.append( [ammonia(xarr,return_components=False,**modelkwargs)] )
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5),
parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True), minpars=(2.73,2.73,0,0,0,0),
parinfo=None,
maxpars=(0,0,0,0,0,1), quiet=True, shh=True, veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Inputs:
xax - x axis
data - y axis
npeaks - How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err - error corresponding to data
These parameters need to have length = 6*npeaks. If npeaks > 1 and length = 6, they will
be replicated npeaks times, otherwise they will be reset to defaults:
params - Fit parameters: [tkin, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0, Tex and Tkin > Tcmb)
limitedmax/maxpars - set upper limits on each parameter
parnames - default parameter names, important for setting kwargs in model ['tkin','tex','ntot','width','xoff_v','fortho']
quiet - should MPFIT output each iteration?
shh - output final parameters?
Returns:
Fit parameters
Model
Fit errors
chi2
"""
if parinfo is None:
self.npars = len(params) / npeaks
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
self.npeaks = npeaks
if isinstance(params,np.ndarray): params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None: parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params','parnames','fixed','limitedmin','limitedmax','minpars','maxpars'],
[params,parnames,fixed,limitedmin,limitedmax,minpars,maxpars]))
# make sure all various things are the right length; if they're not, fix them using the defaults
for partype,parlist in partype_dict.iteritems():
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be multiplied by npars to get to the
# right number of gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
print "WARNING! Input parameters were longer than allowed for variable ",parlist
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars: # all have minima of zero except kinetic temperature, which can't be below CMB. Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'tkin') + (np.array(parnames) == 'tex')) * 2.73
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [ {'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(ii/self.npars),
'mpmaxstep':float(partype_dict['parnames'][ii] in ('tex','tkin')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in xrange(len(partype_dict['params'])) ]
# hack: remove 'fixed' pars
parinfo_with_fixed = parinfo
parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),p['value']) for p in parinfo_with_fixed if p['fixed'])
# don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
# this is OK - not a permanent change
parnames = [p['parname'] for p in parinfo]
# not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
else:
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
parinfo_with_fixed = None
fixed_kwargs = {}
fitfun_kwargs = dict(kwargs.items()+fixed_kwargs.items())
npars = len(parinfo)/self.npeaks
# (fortho0 is not fortho)
# this doesn't work if parinfo_with_fixed is not None:
# this doesn't work for p in parinfo_with_fixed:
# this doesn't work # users can change the defaults while holding them fixed
# this doesn't work if p['fixed']:
# this doesn't work kwargs.update({p['parname']:p['value']})
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))]
else:
def f(p,fjac=None): return [0,(y-self.n_ammonia(pars=p, parnames=parinfo.parnames, **fitfun_kwargs)(x))/err]
return f
if veryverbose:
print "GUESSES: "
print "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
mp = mpfit(mpfitfun(xax,data,err),parinfo=parinfo,quiet=quiet)
mpp = mp.params
if mp.perror is not None: mpperr = mp.perror
else: mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
print "Fit status: ",mp.status
print "Fit message: ",mp.errmsg
print "Final fit values: "
for i,p in enumerate(mpp):
print parinfo[i]['parname'],p," +/- ",mpperr[i]
print "Chi2: ",mp.fnorm," Reduced Chi2: ",mp.fnorm/len(data)," DOF:",len(data)-len(mpp)
if any(['tex' in s for s in parnames]) and any(['tkin' in s for s in parnames]):
texnum = (i for i,s in enumerate(parnames) if 'tex' in s)
tkinnum = (i for i,s in enumerate(parnames) if 'tkin' in s)
for txn,tkn in zip(texnum,tkinnum):
if mpp[txn] > mpp[tkn]: mpp[txn] = mpp[tkn] # force Tex>Tkin to Tex=Tkin (already done in n_ammonia)
self.mp = mp
if parinfo_with_fixed is not None:
# self self.parinfo preserving the 'fixed' parameters
# ORDER MATTERS!
for p in parinfo:
parinfo_with_fixed[p['n']] = p
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo_with_fixed], preserve_order=True)
else:
self.parinfo = parinfo
self.parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
# I don't THINK these are necessary?
#self.parinfo = parinfo
#self.parinfo = ParinfoList([Parinfo(p) for p in self.parinfo])
# need to restore the fixed parameters....
# though the above commented out section indicates that I've done and undone this dozens of times now
# (a test has been added to test_nh3.py)
# this was NEVER included or tested because it breaks the order
#for par in parinfo_with_fixed:
# if par.parname not in self.parinfo.keys():
# self.parinfo.append(par)
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames, **kwargs)(xax)
#if self.model.sum() == 0:
# print "DON'T FORGET TO REMOVE THIS ERROR!"
# raise ValueError("Model is zeros.")
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars] for jj in xrange(len(self.parinfo)/self.npars)]
modelkwargs = [
dict([(p['parname'].strip("0123456789").lower(),p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [ammonia(xax,return_tau=True,**mk) for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1e15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'tkin':'T_K','tex':'T_{ex}','ntot':'N','fortho':'F_o','width':'\\sigma','xoff_v':'v','fillingfraction':'FF','tau':'\\tau_{1-1}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
class ammonia_model_vtau(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_vtau,self).__init__()
self.parnames = ['tkin','tex','tau','width','xoff_v','fortho']
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# TKIN, TEX, ntot, width, center, ortho fraction
return [20,10, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
if self.multisingle == 'single':
return self.onepeakammoniafit(*args,**kwargs)
elif self.multisingle == 'multi':
return self.multinh3fit(*args,**kwargs)
| mit |
jakevdp/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
jrleja/bsfh | misc/timings_pyfsps.py | 3 | 4274 | #compare a lookup table of spectra at ages and metallicities to
#calls to fsps.sps.get_spectrum() for different metallicities
import time, os, subprocess, re, sys
import numpy as np
#import matplotlib.pyplot as pl
import fsps
from prospect import sources as sps_basis
from prospect.models import sedmodel
def run_command(cmd):
"""
Open a child process, and return its exit status and stdout.
"""
child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out = [s for s in child.stdout]
w = child.wait()
return os.WEXITSTATUS(w), out
# Check to make sure that the required environment variable is present.
try:
ev = os.environ["SPS_HOME"]
except KeyError:
raise ImportError("You need to have the SPS_HOME environment variable")
# Check the SVN revision number.
cmd = ["svnversion", ev]
stat, out = run_command(" ".join(cmd))
fsps_vers = int(re.match("^([0-9])+", out[0]).group(0))
sps = fsps.StellarPopulation(zcontinuous=True)
print('FSPS version = {}'.format(fsps_vers))
print('Zs={0}, N_lambda={1}'.format(sps.zlegend, len(sps.wavelengths)))
print('single age')
def spec_from_fsps(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True, tage = sps.params['tage'])
#print(spec.shape)
return time.time()-t0
def mags_from_fsps(z, t, s):
t0 = time.time()
sps.params['zred']=t
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
mags = sps.get_mags(tage = sps.params['tage'], redshift=0.0)
#print(spec.shape)
return time.time()-t0
def spec_from_ztinterp(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
sps.params['imf3'] = s
spec, m, l = sps.ztinterp(sps.params['logzsol'], sps.params['tage'], peraa=True)
#print(spec.shape)
return time.time()-t0
if sys.argv[1] == 'mags':
from_fsps = mags_from_fsps
print('timing get_mags')
print('nbands = {}'.format(len(sps.get_mags(tage=1.0))))
elif sys.argv[1] == 'spec':
from_fsps = spec_from_fsps
print('timing get_spectrum')
elif sys.argv[1] == 'ztinterp':
from_fsps = spec_from_ztinterp
print('timing get_spectrum')
elif sys.argv[1] == 'sedpy':
from sedpy import observate
nbands = len(sps.get_mags(tage=1.0))
fnames = nbands * ['sdss_r0']
filters = observate.load_filters(fnames)
def mags_from_sedpy(z, t, s):
t0 = time.time()
sps.params['logzsol'] = z
sps.params['sigma_smooth'] = s
sps.params['tage'] = t
wave, spec = sps.get_spectrum(peraa=True,
tage = sps.params['tage'])
mags = observate.getSED(wave, spec, filters)
return time.time()-t0
from_fsps = mags_from_sedpy
sps.params['add_neb_emission'] = False
sps.params['smooth_velocity'] = True
sps.params['sfh'] = 0
ntry = 30
zz = np.random.uniform(-1,0,ntry)
tt = np.random.uniform(0.1,4,ntry)
ss = np.random.uniform(1,2.5,ntry)
#make sure all z's already compiled
_ =[from_fsps(z, 1.0, 0.0) for z in [-1, -0.8, -0.6, -0.4, -0.2, 0.0]]
all_dur = []
print('no neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('no neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
sps.params['add_neb_emission'] = True
print('neb emission:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], ss[i])
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
print('neb emission, no smooth:')
dur_many = np.zeros(ntry)
for i in xrange(ntry):
dur_many[i] = from_fsps(zz[i], tt[i], 0.0)
print('<t/call>={0}s, sigma_t={1}s'.format(dur_many.mean(), dur_many.std()))
all_dur += [dur_many]
| mit |
ClinicalGraphics/scikit-image | doc/examples/xx_applications/plot_morphology.py | 6 | 8329 | """
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
fig, ax = plt.subplots()
ax.imshow(phantom, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Let's also define a convenience function for plotting comparisons:
"""
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True, sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
ax2.set_adjustable('box-forced')
"""
Erosion
=======
Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
pixels in the neighborhood centered at (i, j)*. The structuring element,
``selem``, passed to ``erosion`` is a boolean array that describes this
neighborhood. Below, we use ``disk`` to create a circular structuring element,
which we use for most of the following examples.
"""
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(phantom, selem)
plot_comparison(phantom, eroded, 'erosion')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image disappears or gets eroded as we
increase the size of the disk. Also notice the increase in size of the two
black ellipses in the center and the disappearance of the 3 light grey
patches in the lower part of the image.
Dilation
========
Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
regions and shrinks dark regions.
"""
dilated = dilation(phantom, selem)
plot_comparison(phantom, dilated, 'dilation')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image thickens, or gets dilated, as we
increase the size of the disk. Also notice the decrease in size of the two
black ellipses in the centre, and the thickening of the light grey circle in
the center and the 3 patches in the lower part of the image.
Opening
=======
Morphological ``opening`` on an image is defined as an *erosion followed by a
dilation*. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks.
"""
opened = opening(phantom, selem)
plot_comparison(phantom, opened, 'opening')
"""
.. image:: PLOT2RST.current_figure
Since ``opening`` an image starts with an erosion operation, light regions that
are *smaller* than the structuring element are removed. The dilation operation
that follows ensures that light regions that are *larger* than the structuring
element retain their original size. Notice how the light and dark shapes in the
center their original thickness but the 3 lighter patches in the bottom get
completely eroded. The size dependence is highlighted by the outer white ring:
The parts of the ring thinner than the structuring element were completely
erased, while the thicker region at the top retains its original thickness.
Closing
=======
Morphological ``closing`` on an image is defined as a *dilation followed by an
erosion*. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks.
To illustrate this more clearly, let's add a small crack to the white border:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
"""
.. image:: PLOT2RST.current_figure
Since ``closing`` an image starts with an dilation operation, dark regions
that are *smaller* than the structuring element are removed. The dilation
operation that follows ensures that dark regions that are *larger* than the
structuring element retain their original size. Notice how the white ellipses
at the bottom get connected because of dilation, but other dark region retain
their original sizes. Also notice how the crack we added is mostly removed.
White tophat
============
The ``white_tophat`` of an image is defined as the *image minus its
morphological opening*. This operation returns the bright spots of the image
that are smaller than the structuring element.
To make things interesting, we'll add bright and dark spots to the image:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide white square is highlighted since it is
smaller than the structuring element. Also, the thin, white edges around most
of the ellipse are retained because they're smaller than the structuring
element, but the thicker region at the top disappears.
Black tophat
============
The ``black_tophat`` of an image is defined as its morphological **closing
minus the original image**. This operation returns the *dark spots of the
image that are smaller than the structuring element*.
"""
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide black square is highlighted since it is
smaller than the structuring element.
Duality
-------
As you should have noticed, many of these operations are simply the reverse
of another operation. This duality can be summarized as follows:
1. Erosion <-> Dilation
2. Opening <-> Closing
3. White tophat <-> Black tophat
Skeletonize
===========
Thinning is used to reduce each connected component in a binary image to a
*single-pixel wide skeleton*. It is important to note that this is performed
on binary images only.
"""
from skimage import img_as_bool
horse = ~img_as_bool(io.imread(data_dir+'/horse.png', as_grey=True))
sk = skeletonize(horse)
plot_comparison(horse, sk, 'skeletonize')
"""
.. image:: PLOT2RST.current_figure
As the name suggests, this technique is used to thin the image to 1-pixel wide
skeleton by applying thinning successively.
Convex hull
===========
The ``convex_hull_image`` is the *set of pixels included in the smallest
convex polygon that surround all white pixels in the input image*. Again note
that this is also performed on binary images.
"""
hull1 = convex_hull_image(horse)
plot_comparison(horse, hull1, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
which covers the white or True completely in the image.
If we add a small grain to the image, we can see how the convex hull adapts to
enclose that grain:
"""
import numpy as np
horse2 = np.copy(horse)
horse2[45:50, 75:80] = 1
hull2 = convex_hull_image(horse2)
plot_comparison(horse2, hull2, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
Additional Resources
====================
1. `MathWorks tutorial on morphological processing
<http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-erosion.html>`_
2. `Auckland university's tutorial on Morphological Image Processing
<http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
3. http://en.wikipedia.org/wiki/Mathematical_morphology
"""
plt.show()
| bsd-3-clause |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_tkagg.py | 69 | 24593 | # Todd Miller jmiller@stsci.edu
from __future__ import division
import os, sys, math
import Tkinter as Tk, FileDialog
import tkagg # Paint image to Tk photo blitter extension
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
def show():
"""
Show all the figures and enter the gtk mainloop
This should be the last line of your script. This function sets
interactive mode to True, as detailed on
http://matplotlib.sf.net/interactive.html
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
import matplotlib
matplotlib.interactive(True)
if rcParams['tk.pythoninspect']:
os.environ['PYTHONINSPECT'] = '1'
if show._needmain:
Tk.mainloop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w/2, h/2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
self._master = master
self._tkcanvas.focus_set()
# a dict from func-> cbook.Scheduler threads
self.sourced = dict()
# call the idle handler
def on_idle(*ignore):
self.idle_event()
return True
# disable until you figure out how to handle threads and interrupts
#t = cbook.Idle(on_idle)
#self._tkcanvas.after_idle(lambda *ignore: t.start())
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=width, height=height)
self._tkcanvas.create_image(width/2,height/2,image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d: self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = -1
elif num==5: step = +1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
if not self._shown: self.canvas._tkcanvas.bind("<Destroy>", destroy)
_focus = windowing.FocusManager()
if not self._shown:
self.window.deiconify()
# anim.py requires this
if sys.platform=='win32' : self.window.update()
else:
self.canvas.draw()
self._shown = True
def destroy(self, *args):
if Gcf.get_num_fig_managers()==0 and not matplotlib.is_interactive():
if self.window is not None:
self.window.quit()
if self.window is not None:
#self.toolbar.destroy()
self.window.destroy()
pass
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=width, height=height,
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = self.canvas.get_default_filetype()
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
| agpl-3.0 |
mhoffman/kmos | kmos/cli.py | 1 | 16514 | #!/usr/bin/env python
"""Entry point module for the command-line
interface. The kmos executable should be
on the program path, import this modules
main function and run it.
To call kmos command as you would from the shell,
use ::
kmos.cli.main('...')
Every command can be shortened as long as it is non-ambiguous, e.g. ::
kmos ex <xml-file>
instead of ::
kmos export <xml-file>
etc.
"""
# Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com)
# This file is part of kmos.
#
# kmos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kmos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kmos. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import shutil
usage = {}
usage['all'] = """kmos help all
Display documentation for all commands.
"""
usage['benchmark'] = """kmos benchmark
Run 1 mio. kMC steps on model in current directory
and report runtime.
"""
usage['build'] = """kmos build
Build kmc_model.%s from *f90 files in the
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['help'] = """kmos help <command>
Print usage information for the given command.
"""
usage['export'] = """kmos export <xml-file> [<export-path>]
Take a kmos xml-file and export all generated
source code to the export-path. There try to
build the kmc_model.%s.
Additional Parameters ::
-s/--source-only
Export source only and don't build binary
-b/--backend (local_smart|lat_int)
Choose backend. Default is "local_smart".
lat_int is EXPERIMENTAL and not made
for production, yet.
-d/--debug
Turn on assertion statements in F90 code.
(Only active in compile step)
--acf
Build the modules base_acf.f90 and proclist_acf.f90. Default is false.
This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement).
-n/--no-compiler-optimization
Do not send optimizing flags to compiler.
""" % ('pyd' if os.name == 'nt' else 'so')
usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>]
Take a kmos xml-file and export kmc_settings.py
to the export-path.
"""
usage['edit'] = """kmos edit <xml-file>
Open the kmos xml-file in a GUI to edit
the model.
"""
usage['import'] = """kmos import <xml-file>
Take a kmos xml-file and open an ipython shell
with the project_tree imported as pt.
"""
usage['rebuild'] = """kmos rebuild
Export code and rebuild binary module from XML
information included in kmc_settings.py in
current directory.
Additional Parameters ::
-d/--debug
Turn on assertion statements in F90 code
"""
usage['shell'] = """kmos shell
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['run'] = """kmos run
Open an interactive shell and create a KMC_Model in it
run == shell
"""
usage['version'] = """kmos version
Print version number and exit.
"""
usage['view'] = """kmos view
Take a kmc_model.%s and kmc_settings.py in the
same directory and start to simulate the
model visually.
Additional Parameters ::
-v/--steps-per-frame <number>
Number of steps per frame
""" % ('pyd' if os.name == 'nt' else 'so')
usage['xml'] = """kmos xml
Print xml representation of model to stdout
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import kmos
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=kmos.__version__)
parser.add_option('-s', '--source-only',
dest='source_only',
action='store_true',
default=False)
parser.add_option('-p', '--path-to-f2py',
dest='path_to_f2py',
default='f2py')
parser.add_option('-b', '--backend',
dest='backend',
default='local_smart')
parser.add_option('-a', '--avoid-default-state',
dest='avoid_default_state',
action='store_true',
default=False,
)
parser.add_option('-v', '--steps-per-frame',
dest='steps_per_frame',
type='int',
default='50000')
parser.add_option('-d', '--debug',
default=False,
dest='debug',
action='store_true')
parser.add_option('-n', '--no-compiler-optimization',
default=False,
dest='no_optimize',
action='store_true')
parser.add_option('-o', '--overwrite',
default=False,
action='store_true')
parser.add_option('-l', '--variable-length',
dest='variable_length',
default=95,
type='int')
parser.add_option('-c', '--catmap',
default=False,
action='store_true')
parser.add_option('--acf',
dest='acf',
action='store_true',
default=False,
)
try:
from numpy.distutils.fcompiler import get_default_fcompiler
from numpy.distutils import log
log.set_verbosity(-1, True)
fcompiler = get_default_fcompiler()
except:
fcompiler = 'gfortran'
parser.add_option('-f', '--fcompiler',
dest='fcompiler',
default=os.environ.get('F2PY_FCOMPILER', fcompiler))
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ kmos <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
global model, pt, np, cm_model
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
if args[0] == 'benchmark':
from sys import path
path.append(os.path.abspath(os.curdir))
nsteps = 1000000
from time import time
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
time0 = time()
try:
model.proclist.do_kmc_steps(nsteps)
except: # kmos < 0.3 had no model.proclist.do_kmc_steps
model.do_steps(nsteps)
needed_time = time() - time0
print('Using the [%s] backend.' % model.get_backend())
print('%s steps took %.2f seconds' % (nsteps, needed_time))
print('Or %.2e steps/s' % (1e6 / needed_time))
model.deallocate()
elif args[0] == 'build':
from kmos.utils import build
build(options)
elif args[0] == 'edit':
from kmos import gui
gui.main()
elif args[0] == 'settings-export':
import kmos.types
import kmos.io
from kmos.io import ProcListWriter
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = args[2]
project = kmos.types.Project()
project.import_file(xml_file)
writer = ProcListWriter(project, export_dir)
writer.write_settings()
elif args[0] == 'export':
import kmos.types
import kmos.io
from kmos.utils import build
if len(args) < 2:
parser.error('XML file and export path expected.')
if len(args) < 3:
out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend)
print('No export path provided. Exporting to %s' % out_dir)
args.append(out_dir)
xml_file = args[1]
export_dir = os.path.join(args[2], 'src')
project = kmos.types.Project()
project.import_file(xml_file)
project.shorten_names(max_length=options.variable_length)
kmos.io.export_source(project,
export_dir,
options=options)
if ((os.name == 'posix'
and os.uname()[0] in ['Linux', 'Darwin'])
or os.name == 'nt') \
and not options.source_only:
os.chdir(export_dir)
build(options)
for out in glob('kmc_*'):
if os.path.exists('../%s' % out) :
if options.overwrite :
overwrite = 'y'
else:
overwrite = raw_input(('Should I overwrite existing %s ?'
'[y/N] ') % out).lower()
if overwrite.startswith('y') :
print('Overwriting {out}'.format(**locals()))
os.remove('../%s' % out)
shutil.move(out, '..')
else :
print('Skipping {out}'.format(**locals()))
else:
shutil.move(out, '..')
elif args[0] == 'settings-export':
import kmos.io
pt = kmos.io.import_file(args[1])
if len(args) < 3:
out_dir = os.path.splitext(args[1])[0]
print('No export path provided. Exporting kmc_settings.py to %s'
% out_dir)
args.append(out_dir)
if not os.path.exists(args[2]):
os.mkdir(args[2])
elif not os.path.isdir(args[2]):
raise UserWarning("Cannot overwrite %s; Exiting;" % args[2])
writer = kmos.io.ProcListWriter(pt, args[2])
writer.write_settings()
elif args[0] == 'help':
if len(args) < 2:
parser.error('Which help do you want?')
if args[1] == 'all':
for command in sorted(usage):
print(usage[command])
elif args[1] in usage:
print('Usage: %s\n' % usage[args[1]])
else:
arg = match_keys(args[1], usage, parser)
print('Usage: %s\n' % usage[arg])
elif args[0] == 'import':
import kmos.io
if not len(args) >= 2:
raise UserWarning('XML file name expected.')
pt = kmos.io.import_xml_file(args[1])
if len(args) == 2:
sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1])
elif len(args) == 3: # if optional 3rd argument is given, store model there and exit
pt.save(args[2])
elif args[0] == 'rebuild':
from time import sleep
print('Will rebuild model from kmc_settings.py in current directory')
print('Please do not interrupt,'
' build process, as you will most likely')
print('loose the current model files.')
sleep(2.)
from sys import path
path.append(os.path.abspath(os.curdir))
from tempfile import mktemp
if not os.path.exists('kmc_model.so') \
and not os.path.exists('kmc_model.pyd'):
raise Exception('No kmc_model.so found.')
if not os.path.exists('kmc_settings.py'):
raise Exception('No kmc_settings.py found.')
from kmos.run import KMC_Model
model = KMC_Model(print_rates=False, banner=False)
tempfile = mktemp()
f = file(tempfile, 'w')
f.write(model.xml())
f.close()
for kmc_model in glob('kmc_model.*'):
os.remove(kmc_model)
os.remove('kmc_settings.py')
main('export %s -b %s .' % (tempfile, options.backend))
os.remove(tempfile)
model.deallocate()
elif args[0] in ['run', 'shell']:
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
# useful to have in interactive mode
import numpy as np
try:
from matplotlib import pyplot as plt
except:
plt = None
if options.catmap:
import catmap
import catmap.cli.kmc_runner
seed = catmap.cli.kmc_runner.get_seed_from_path('.')
cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals()))
catmap_message = '\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file="{seed}.mkm")'.format(**locals())
else:
catmap_message = ''
try:
model = KMC_Model(print_rates=False)
except:
print("Warning: could not import kmc_model!"
" Please make sure you are in the right directory")
sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals()))
try:
model.deallocate()
except:
print("Warning: could not deallocate model. Was is allocated?")
elif args[0] == 'version':
from kmos import VERSION
print(VERSION)
elif args[0] == 'view':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos import view
view.main(steps_per_frame=options.steps_per_frame)
elif args[0] == 'xml':
from sys import path
path.append(os.path.abspath(os.curdir))
from kmos.run import KMC_Model
model = KMC_Model(banner=False, print_rates=False)
print(model.xml())
else:
parser.error('Command "%s" not understood.' % args[0])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
| gpl-3.0 |
gandalfcode/gandalf | tests/paper_tests/binaryorbit.py | 1 | 3711 | #==============================================================================
# freefalltest.py
# Run the freefall collapse test using initial conditions specified in the
# file 'freefall.dat'.
#==============================================================================
from gandalf.analysis.facade import *
from gandalf.analysis.data_fetcher import *
from gandalf.analysis.compute import particle_data
from gandalf.analysis.SimBuffer import SimBuffer, BufferException
import time
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
#--------------------------------------------------------------------------------------------------
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})
rc('text', usetex=True)
# Binary parameters
m1 = 0.5
m2 = 0.5
abin = 1.0
ebin = 0.5
etot0 = -0.5*m1*m2/abin
period = 2.0*math.pi*math.sqrt(abin*abin*abin/(m1 + m2))
xmin = -0.6
xmax = 2.1
ymin = -0.85
ymax = 0.85
xsize = xmax - xmin
ysize = ymax - ymin
CreateTimeData('x',particle_data,quantity='x')
CreateTimeData('y',particle_data,quantity='y')
# Leapfrog KDK
kdksim = newsim('binaryorbit.dat')
kdksim.SetParam('nbody','lfkdk')
setupsim()
run()
x_kdk = get_time_data("t","x")
y_kdk = get_time_data("t","y")
# Leapfrog DKD
dkdsim = newsim('binaryorbit.dat')
dkdsim.SetParam('nbody','lfdkd')
setupsim()
run()
x_dkd = get_time_data("t","x")
y_dkd = get_time_data("t","y")
# 4th-order Hermite
hermite4sim = newsim('binaryorbit.dat')
hermite4sim.SetParam('nbody','hermite4')
setupsim()
run()
x_hermite4 = get_time_data("t","x")
y_hermite4 = get_time_data("t","y")
# 4th-order Hermite TS
hermite4tssim = newsim('binaryorbit.dat')
hermite4tssim.SetParam('nbody','hermite4ts')
hermite4tssim.SetParam('Npec',5)
setupsim()
run()
x_4ts = get_time_data("t","x")
y_4ts = get_time_data("t","y")
# 6th-order Hermite
#hermite6tssim = newsim('binaryorbit.dat')
#hermite6tssim.SetParam('nbody','hermite6ts')
#hermite6tssim.SetParam('Npec',5)
#setupsim()
#run()
#x_6ts = get_time_data("t","x")
#y_6ts = get_time_data("t","y")
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
#fig, axarr = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(10,4))
fig, axarr = plt.subplots(4, 1, figsize=(6,11), sharex='col', sharey='row')
fig.subplots_adjust(hspace=0.001, wspace=0.001)
fig.subplots_adjust(bottom=0.06, top=0.98, left=0.14, right=0.98)
axarr[0].set_ylabel(r"$y$")
axarr[0].set_ylim([ymin, ymax])
axarr[0].set_xlim([xmin, xmax])
axarr[0].plot(x_kdk.y_data, y_kdk.y_data, color="black", linestyle='-', label='Leapfrog KDK', lw=1.0)
axarr[0].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(a) Leapfrog-KDK", fontsize=12)
axarr[1].set_ylabel(r"$y$")
axarr[1].set_ylim([ymin, ymax])
axarr[1].plot(x_dkd.y_data, y_dkd.y_data, color="black", linestyle='-', label='Leapfrog DKD', lw=1.0)
axarr[1].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(b) Leapfrog-DKD", fontsize=12)
axarr[2].set_ylabel(r"$y$")
axarr[2].set_ylim([ymin, ymax])
axarr[2].plot(x_hermite4.y_data, y_hermite4.y_data, color="black", linestyle='-', label='4H', lw=1.0)
axarr[2].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(c) 4th-order Hermite", fontsize=12)
axarr[3].set_xlabel(r"$x$")
axarr[3].set_ylabel(r"$y$")
axarr[3].set_ylim([ymin, ymax])
axarr[3].plot(x_4ts.y_data, y_4ts.y_data, color="black", linestyle='-', label='4TS', lw=1.0)
axarr[3].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(d) 4th-order Hermite TS", fontsize=12)
plt.show()
fig.savefig('binaryorbit.pdf', dpi=50)
# Prevent program from closing before showing plot window
block()
| gpl-2.0 |
hilaskis/UAV_MissionPlanner | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-2.0 |
mueller-lab/PyFRAP | pyfrp/modules/pyfrp_optimization_module.py | 2 | 6867 | #=====================================================================================================================================
#Copyright
#=====================================================================================================================================
#Copyright (C) 2014 Alexander Blaessle, Patrick Mueller and the Friedrich Miescher Laboratory of the Max Planck Society
#This software is distributed under the terms of the GNU General Public License.
#This file is part of PyFRAP.
#PyFRAP is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#===========================================================================================================================================================================
#Module Description
#===========================================================================================================================================================================
"""Optimization module for PyFRAP toolbox.
Currently contains all functions necessary to transform a constrained FRAP optimization problem into
a unconstrained one, making it suitable to Nelder-Mead optimization algorithm.
"""
#===========================================================================================================================================================================
#Importing necessary modules
#===========================================================================================================================================================================
#Numpy/Scipy
import numpy as np
#PyFRAP
import pyfrp_fit_module
from pyfrp_term_module import *
#===========================================================================================================================================================================
#Module Functions
#===========================================================================================================================================================================
def constrObjFunc(x,fit,debug,ax,returnFit):
"""Objective function when using Constrained Nelder-Mead.
Calls :py:func:`pyfrp.modules.pyfrp_optimization_module.xTransform` to transform x into
constrained version, then uses :py:func:`pyfrp.modules.pyfrp_fit_module.FRAPObjFunc` to
find SSD.
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
debug (bool): Display debugging output and plots.
ax (matplotlib.axes): Axes to display plots in.
returnFit (bool): Return fit instead of SSD.
Returns:
float: SSD of fit. Except ``returnFit==True``, then will return fit itself.
"""
LBs, UBs = buildBoundLists(fit)
x=xTransform(x,LBs,UBs)
ssd=pyfrp_fit_module.FRAPObjFunc(x,fit,debug,ax,returnFit)
return ssd
def xTransform(x,LB,UB):
"""Transforms ``x`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
.. note:: Will add tiny offset to LB(D), to avoid singularities.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x (list): Input vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
#Make sure everything is float
x=np.asarray(x,dtype=np.float64)
LB=np.asarray(LB,dtype=np.float64)
UB=np.asarray(UB,dtype=np.float64)
#Check if LB_D==0, then add a little noise to it so we do not end up with xtrans[D]==0 and later have singularities when scaling tvec
if LB[0]==0:
LB[0]=1E-10
#Determine number of parameters to be fitted
nparams=len(x)
#Make empty vector
xtrans = np.zeros(np.shape(x))
# k allows some variables to be fixed, thus dropped from the
# optimization.
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
xtrans[i]=UB[i]-x[k]**2
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
xtrans[i]=LB[i]+x[k]**2
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
xtrans[i] = (np.sin(x[k])+1.)/2.*(UB[i] - LB[i]) + LB[i]
xtrans[i] = max([LB[i],min([UB[i],xtrans[i]])])
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
xtrans[i] = x[k]
k=k+1
#Note: The original file has here another case for fixed variable, but since we made the decision earlier which when we call frap_fitting, we don't need this here.
return xtrans
def transformX0(x0,LB,UB):
"""Transforms ``x0`` into constrained form, obeying upper
bounds ``UB`` and lower bounds ``LB``.
Idea taken from http://www.mathworks.com/matlabcentral/fileexchange/8277-fminsearchbnd--fminsearchcon
Args:
x0 (list): Input initial vector, consiting of [D,(prod),(degr)].
LB (list): List of lower bounds for ``D,prod,degr``.
UB (list): List of upper bounds for ``D,prod,degr``.
Returns:
list: Transformed x-values.
"""
x0u = list(x0)
nparams=len(x0)
k=0
for i in range(nparams):
#Upper bound only
if UB[i]!=None and LB[i]==None:
if UB[i]<=x0[i]:
x0u[k]=0
else:
x0u[k]=sqrt(UB[i]-x0[i])
k=k+1
#Lower bound only
elif UB[i]==None and LB[i]!=None:
if LB[i]>=x0[i]:
x0u[k]=0
else:
x0u[k]=np.sqrt(x0[i]-LB[i])
k=k+1
#Both bounds
elif UB[i]!=None and LB[i]!=None:
if UB[i]<=x0[i]:
x0u[k]=np.pi/2
elif LB[i]>=x0[i]:
x0u[k]=-np.pi/2
else:
x0u[k] = 2*(x0[i] - LB[i])/(UB[i]-LB[i]) - 1;
#shift by 2*pi to avoid problems at zero in fminsearch otherwise, the initial simplex is vanishingly small
x0u[k] = 2*np.pi+np.arcsin(max([-1,min(1,x0u[k])]));
k=k+1
#No bounds
elif UB[i]==None and LB[i]==None:
x0u[k] = x[i]
k=k+1
return x0u
def buildBoundLists(fit):
"""Builds list of lower bounds and upper bounds.
Args:
fit (pyfrp.subclasses.pyfrp_fit): Fit object.
Returns:
tuple: Tuple containing:
* LBs (list): List of lower bounds.
* UBs (list): List of upper bounds.
"""
LBs=[fit.LBD]+int(fit.fitProd)*[fit.LBProd]+int(fit.fitDegr)*[fit.LBDegr]+len(fit.ROIsFitted)*[fit.LBEqu]
UBs=[fit.UBD]+int(fit.fitProd)*[fit.UBProd]+int(fit.fitDegr)*[fit.UBDegr]+len(fit.ROIsFitted)*[fit.UBEqu]
return LBs,UBs | gpl-3.0 |
koobonil/Boss2D | Boss2D/addon/_old/webrtc-qt5.11.2_for_boss/tools_webrtc/cpu/cpu_mon.py | 6 | 2057 | #!/usr/bin/env python
#
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
import psutil
import sys
import numpy
from matplotlib import pyplot
class CpuSnapshot(object):
def __init__(self, label):
self.label = label
self.samples = []
def Capture(self, sample_count):
print ('Capturing %d CPU samples for %s...' %
((sample_count - len(self.samples)), self.label))
while len(self.samples) < sample_count:
self.samples.append(psutil.cpu_percent(1.0, False))
def Text(self):
return ('%s: avg=%s, median=%s, min=%s, max=%s' %
(self.label, numpy.average(self.samples),
numpy.median(self.samples),
numpy.min(self.samples), numpy.max(self.samples)))
def Max(self):
return numpy.max(self.samples)
def GrabCpuSamples(sample_count):
print 'Label for snapshot (enter to quit): '
label = raw_input().strip()
if len(label) == 0:
return None
snapshot = CpuSnapshot(label)
snapshot.Capture(sample_count)
return snapshot
def main():
print 'How many seconds to capture per snapshot (enter for 60)?'
sample_count = raw_input().strip()
if len(sample_count) > 0 and int(sample_count) > 0:
sample_count = int(sample_count)
else:
print 'Defaulting to 60 samples.'
sample_count = 60
snapshots = []
while True:
snapshot = GrabCpuSamples(sample_count)
if snapshot == None:
break
snapshots.append(snapshot)
if len(snapshots) == 0:
print 'no samples captured'
return -1
pyplot.title('CPU usage')
for s in snapshots:
pyplot.plot(s.samples, label=s.Text(), linewidth=2)
pyplot.legend()
pyplot.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
aemerick/galaxy_analysis | method_paper_plots/star_abundances.py | 1 | 26128 | from galaxy_analysis.plot.plot_styles import *
import matplotlib.pyplot as plt
import glob
import deepdish as dd
import yt
from galaxy_analysis.utilities import utilities
import numpy as np
from matplotlib.ticker import NullFormatter
from galaxy_analysis.particle_analysis.abundances import single_MDF
#
from galaxy_analysis.analysis import Galaxy
from mpl_toolkits.axes_grid1 import make_axes_locatable
import h5py
# grab the most recent file
workdir = '/mnt/ceph/users/emerick/enzo_runs/pleiades/starIC/run11_30km/final_sndriving/'
#workdir = '/home/emerick/work/enzo_runs/pleiades/starIC/run11_30km/final_sndriving/'
data_files = np.sort(glob.glob(workdir + 'DD????'))
name = data_files[-1].split('final_sndriving/')[1]
gal = Galaxy(name, wdir = workdir)
#
#
#
def plot_alpha_vs_fe():
fig,ax = plt.subplots()
fig.set_size_inches(8,7)
ptype = gal.df['particle_type']
fe_over_h = gal.df[('io','particle_Fe_over_H')]
alpha = gal.df[('io','particle_alpha_over_Fe')]
age = (gal.ds.current_time - gal.df[('io','creation_time')]).convert_to_units('Myr')
age = age - np.min(age)
p = ax.scatter(fe_over_h[ptype==11], alpha[ptype==11],
s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
cb = fig.colorbar(p)
cb.set_label(r'Stellar Age (Myr)')
ax.set_xlim(-9,-1)
ax.set_ylim(-1.75,1.75)
ax.set_xlabel(r'[Fe/H]')
ax.set_ylabel(r'[$\rm \alpha$/Fe]')
plt.minorticks_on()
plt.tight_layout()
fig.savefig('alpha_over_fe.png')
plt.close()
return
def plot_alpha_vs_fe_movie():
times = np.arange(0, 245, 1)
for i, t in enumerate(times):
plot_alpha_vs_fe_with_histograms(t_f = t, image_num = i)
def plot_alpha_vs_fe_with_histograms(t_f = None, image_num = 0):
sep = 0.02
left, width = 0.125, 0.65
bottom, height = 0.1, 0.65
left_h = left + width + sep
bottom_h = bottom + height + sep
rect_scatter = [left,bottom,width,height]
# rect_colorbar =
# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]
# rect_histy = [left_h, bottom, 0.95 - left_h, height]
# fig,ax = plt.subplots()
fig = plt.figure(1, figsize=(8,8))
# fig.set_size_inches(8,8)
ax_scatter = plt.axes(rect_scatter)
# ax_hist_x = plt.axes(rect_histx)
# ax_hist_y = plt.axes(rect_histy)
# ax_color = plt.axes(rect_colorbar)
ptype = gal.df['particle_type']
fe_over_h = gal.df[('io','particle_Fe_over_H')]
alpha = gal.df[('io','particle_alpha_over_Fe')]
creation_time = gal.df[('io','creation_time')].convert_to_units('Myr')
age = (gal.ds.current_time - creation_time)
if t_f is None: # plot normally all MS stars
age = age - np.min(age)
# scatter plot
p = ax_scatter.scatter(fe_over_h[ptype==11], alpha[ptype==11],
s = point_size, lw = 2, c = age[ptype==11], cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
else:
min_clim = 0.0
max_clim = np.max( age - np.min(age))
particle_lifetimes = gal.df[('io','particle_model_lifetime')].convert_to_units('Myr')
selection = (t_f >= creation_time) * ( t_f < creation_time + particle_lifetimes)
age = t_f - creation_time
if np.size(fe_over_h[selection]) < 1:
plot_fe_over_h = np.ones(np.size(fe_over_h))*(-10000) # make dummy values so plot still diplays, but is empty
plot_alpha = np.ones(np.size(alpha))*(-10000)
plot_age = np.ones(np.size(age))*(-10000)
else:
plot_fe_over_h = fe_over_h[selection]
plot_alpha = alpha[selection]
plot_age = age[selection]
p = ax_scatter.scatter(plot_fe_over_h, plot_alpha, s = point_size, lw = 2,
c = plot_age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([min_clim,max_clim])
cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,
aspect = 40)
cb.set_label(r'Stellar Age (Myr)')
#
#
ax_scatter.set_xlim(-9,-1)
ax_scatter.set_ylim(-1.75,1.75)
ax_scatter.tick_params(axis='x',which='minor',bottom='on')
ax_scatter.tick_params(axis='y',which='minor',bottom='on')
ax_scatter.set_xlabel(r'[Fe/H]')
ax_scatter.set_ylabel(r'[$\rm \alpha$/Fe]')
plt.minorticks_on()
ax_scatter.plot( ax_scatter.get_xlim(), [0.0,0.0], lw = line_width, color = 'black', ls = '--')
#
# find main plot and construct histograms
#
divider = make_axes_locatable(ax_scatter)
left, bottom, width, height = divider.get_position()
# width, height = divider.get_horizontal(), divider.get_vertical()
sep = 0.01
thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))
rect_histx = [left, bottom + height + sep, width, thickness]
rect_histy = [left + width + sep, bottom, thickness, height]
ax_hist_x = plt.axes(rect_histx)
ax_hist_y = plt.axes(rect_histy)
nbins = 100
hist,bins = np.histogram(fe_over_h, bins = nbins)
weights = np.ones(np.size(fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(fe_over_h, color = 'C0', bins = nbins, weights = weights)
if not (t_f is None):
if np.max(plot_fe_over_h) > -1000:
hist,bins = np.histogram(plot_fe_over_h, bins = nbins)
weights = np.ones(np.size(plot_fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(plot_fe_over_h, color = 'black', bins = nbins, weights = weights,
histtype = 'step', lw = 2.0)
# plot_histogram(ax_hist_x, bins, hist / (1.0*np.max(hist)), color = 'black')
plt.minorticks_on()
# hist,bins = np.histogram(alpha, bins = 24)
# plot_histogram(ax_hist_y, bins, hist / (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')
nbins = 50
hist,bins = np.histogram(alpha, bins = nbins)
weights = np.ones(np.size(fe_over_h)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(alpha, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)
if not (t_f is None):
if np.max(plot_alpha) > -1000:
hist,bins = np.histogram(plot_alpha, bins = nbins)
weights = np.ones(np.size(plot_alpha)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(plot_alpha, orientation = 'horizontal', color = 'black', bins = nbins,
weights = weights, histtype='step', lw = 2.0)
ax_hist_x.xaxis.set_major_formatter(NullFormatter())
ax_hist_y.yaxis.set_major_formatter(NullFormatter())
ax_hist_x.set_xlim(ax_scatter.get_xlim())
ax_hist_y.set_ylim(ax_scatter.get_ylim())
ticks = [0.0,0.25,0.5,0.75,1.0]
ax_hist_x.set_yticks(ticks)
ax_hist_y.set_xticks(ticks)
ax_hist_y.set_xticklabels(ticks, rotation = 270)
plt.minorticks_on()
# plt.tight_layout()
if t_f is None:
fig.savefig('alpha_over_fe_hist.png')
else:
fig.savefig('alpha_movie/alpha_over_fe_hist_%0004i.png'%(image_num))
plt.close()
return
def plot_panel(A = 'Fe', B = 'Fe', C = 'H', color = True):
"""
Make panel plots of X/A vs. B/C where "X" is a loop through all elements available,
and A, B, C are fixed for all plots, chosen by user. Defualt will plot
[X/Fe] vs. [Fe/H]. Default behavior is to color points by age.
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
age = data['Time'] - data['creation_time'] # age of all particles in this data set
for base in ['H','Fe']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
if base == 'Fe':
bins = np.arange(-3,3.1,0.1)
else:
bins = np.arange(-9,0,0.1)
i,j = 0,0
for e in elements:
if (A == e): # skip
continue
index = (i,j)
y = np.array(data['abundances'][e][A])
x = np.array(data['abundances'][B][C])
p = ax[index].scatter(x, y, s = point_size*0.5,
lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
xy = (0.8,0.8)
ax[index].annotate(e, xy=xy, xytext=xy, xycoords = 'axes fraction',
textcoords = 'axes fraction')
# cb = fig.colorbar(p)
# cb.set_label(r'Stellar Age (Myr)')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'log([' + B + '/' + C + '])')
ax[(i,0)].set_ylabel(r'log([X/' + A + '])')
if C == 'H':
ax[(i,0)].set_xlim(-10.25, 0.125)
else:
ax[(i,0)].set_xlim(-3.25, 3.25)
if A == 'H':
ax[(0,i)].set_ylim(-10.25, 0.125)
else:
ax[(0,i)].set_ylim(-3.25, 3.25)
for j in np.arange(4):
ax[(j,i)].plot([-10,10], [0.0,0.0], lw = 0.5 * line_width, ls = ':', color = 'black')
plt.minorticks_on()
fig.savefig('X_over_' + A +'_vs_' + B + '_over_' + C + '_panel.png')
plt.close()
return
def plot_spatial_profiles(field = 'metallicity', abundance = False,
bins = None, spatial_type = 'cylindrical_radius'):
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
if spatial_type == 'cylindrical_radius':
bin_field = np.sqrt(data['kinematics']['x']**2 + data['kinematics']['y']**2)
xlabel = r'Radius (pc)'
elif spatial_type == 'z':
bin_field = np.abs( data['kinematics']['z'] )
xlabel = r'Z (pc)'
if bins is None:
bins = np.linspace(np.floor(np.min(bin_field)), np.ceil(np.max(bin_field)), 100)
centers = 0.5 * (bins[1:] + bins[:-1])
nbins = np.size(bins)
hist_index = np.digitize(bin_field, bins = bins)
median, q1, q3 = np.zeros(nbins-1), np.zeros(nbins-1), np.zeros(nbins-1)
if field == 'metallicity':
# make a single plot
# bin the data
for i in np.arange(nbins-1):
x = data['metallicity'][hist_index == i + 1]
median[i] = np.median(x)
if np.size(x) > 1:
q1[i] = np.percentile(x, 25.0)
q3[i] = np.percentile(x, 75.0)
elif np.size(x) == 1:
q1[i] = median[i]
q3[i] = median[i]
# now plot
fig, ax = plt.subplots()
fig.set_size_inches(8,8)
plot_histogram(ax, bins, median, lw = line_width, color = 'black', ls = '-')
ax.fill_between(centers, q1, q3, lw = 1.5, color = 'grey')
ax.set_ylabel(r'Metallicity Fraction')
ax.set_xlabel(xlabel)
ax.set_xlim( np.min(bins), np.max(bins))
plt.tight_layout()
plt.minorticks_on()
fig.savefig('metallicity_' + spatial_type + '_profile.png')
plt.close()
elif abundance:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(16,16)
fig.subplots_adjust(hspace = 0.0, wspace = 0.0)
axi, axj = 0,0
for e in elements:
if field == e:
continue
index = (axi,axj)
for i in np.arange(nbins-1):
x = np.array(data['abundances'][e][field])
x = x[ hist_index == (i + 1)]
if np.size(x) > 0:
median[i] = np.median(x)
q1[i] = np.percentile(x, 25)
q3[i] = np.percentile(x, 75)
else:
median[i] = None; q1[i] = None; q3[i] = None
ax[index].annotate(e, xy=(0.8,0.8),xytext=(0.8,0.8),
xycoords='axes fraction',textcoords = 'axes fraction')
plot_histogram(ax[index], bins, median, lw = line_width, color = 'black', ls = '-')
ax[index].fill_between(centers,q1,q3,lw=1.5,color='grey')
axj = axj+1
if axj>=4:
axj = 0
axi = axi + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(xlabel)
ax[(i,0)].set_ylabel(r'log[X/' + field +'])')
if field == 'H':
ax[(0,i)].set_ylim(-10.25,0.125)
else:
ax[(0,i)].set_ylim(-3.25,3.25)
for j in np.arange(4):
ax[(j,i)].plot([bins[0],bins[-1]], [0.0,0.0], lw = 0.5 * line_width, ls = '--',color ='black')
ax[(i,0)].set_xlim(np.min(bins), np.max(bins))
plt.minorticks_on()
fig.savefig(field + '_' + spatial_type + '_profile_panel.png')
plt.close()
return
def plot_MDF(plot_base = ['H','Fe']):
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
if (not (type(plot_base) is list)):
plot_base = [plot_base]
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
for base in plot_base:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
if base == 'Fe':
bins = np.arange(-3,3.1,0.1)
else:
bins = np.arange(-9,0,0.1)
i,j = 0,0
for e in elements:
if (base == e):
continue
index = (i,j)
points = np.array(data['abundances'][e][base])
single_MDF(points, bins = bins, norm = 'peak', ax = ax[index],
label = False, lw = line_width)
x = np.max(bins) - (0.25/6.0 * (bins[-1] - bins[0]))
y = 0.9
ax[index].annotate(e, xy = (x,y), xytext =(x,y))
ax[index].plot([0,0], [0.0,1.0], ls = ':', lw = 0.5 * line_width, color = 'black')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'log([X/' + base + '])')
ax[(i,0)].set_ylabel(r'N/N$_{\rm peak}$')
if base == 'H':
ax[(i,0)].set_xlim(-10.25, 0.125)
elif base == 'Fe':
ax[(i,0)].set_xlim(-3.25, 3.25)
plt.minorticks_on()
fig.savefig(base + '_MDF.png')
plt.close()
return
def plot_time_evolution():
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha']
for time_type in ['cumulative','10Myr']:
for base in ['H','Fe']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
i,j = 0,0
for e in elements:
if (base == e):
continue
print("plotting " + e + "/" + base + " time evolution")
index = (i,j)
t = data['statistics'][time_type]['bins']
y = data['statistics'][time_type][e][base]['median']
Q1 = data['statistics'][time_type][e][base]['Q1']
Q3 = data['statistics'][time_type][e][base]['Q3']
select = (y*0 == 0) # remove nan values
t = t[select]
t = t - t[0]
ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')
ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)
ax[index].set_xlim(0.0, np.max(t))
ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)
ax[index].legend(loc = 'upper right')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'Time (Myr)')
ax[(i,0)].set_ylabel(r'[X/' + base +']')
if base == 'H':
ax[(i,0)].set_ylim(-12.25, 0.125)
elif base == 'Fe':
ax[(i,0)].set_ylim(-3.25, 3.25)
# for j in np.arange(3):
# ax[(j,i)].set_xticklabels([])
# ax[(i,j+1)].set_yticklabels([])
# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))
# if base == 'Fe':
# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])
# else:
# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])
plt.minorticks_on()
fig.savefig('stellar_x_over_' + base + '_' + time_type +'_evolution.png')
plt.close()
return
def plot_mass_fraction_time_evolution():
"""
Make a panel plot of the time evolution of all elemental abundance ratios
with respect to both H and Fe (as separate plots)
"""
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
# elements = elements + ['alpha']
for time_type in ['cumulative','10Myr']:
fig, ax = plt.subplots(4,4, sharex = True, sharey = True)
fig.set_size_inches(4*4,4*4)
fig.subplots_adjust(hspace=0.0, wspace = 0.0)
i,j = 0,0
for e in elements:
print("plotting " + e + "mass fraction time evolution")
index = (i,j)
t = data['mass_fraction_statistics'][time_type]['bins']
y = data['mass_fraction_statistics'][time_type][e]['median']
Q1 = data['mass_fraction_statistics'][time_type][e]['Q1']
Q3 = data['mass_fraction_statistics'][time_type][e]['Q3']
select = (y*0 == 0) # remove nan values
t = t[select]
t = t - t[0]
ax[index].plot( t, y[select], lw = line_width, ls = '-', color = 'black', label = r' ' + e +' ')
ax[index].fill_between(t, Q1[select], Q3[select], color = 'black', alpha = 0.5, lw = 0.5 * line_width)
ax[index].set_xlim(0.0, np.max(t))
ax[index].plot( [0.0,1000.0], [0.0,0.0], ls = ':', color = 'black', lw = line_width)
ax[index].legend(loc = 'upper right')
j = j + 1
if j >= 4:
j = 0
i = i + 1
for i in np.arange(4):
ax[(3,i)].set_xlabel(r'Time (Myr)')
ax[(i,0)].set_ylabel(r'log(X Mass Fraction)')
ax[(i,0)].set_ylim(1.0E-10, 1.0E-4)
ax[(i,0)].semilogy()
# for j in np.arange(3):
# ax[(j,i)].set_xticklabels([])
# ax[(i,j+1)].set_yticklabels([])
# ax[(3,i)].set_xticklabels(np.arange(0,np.max(t)+20,20))
# if base == 'Fe':
# ax[(i,0)].set_yticklabels([-3,-2,-1,0,1,2,3,])
# else:
# ax[(i,0)].set_yticklabels([-12, -10, -8, -6, -4, -2, 0])
plt.minorticks_on()
fig.savefig('stellar_mass_fraction_' + time_type +'_evolution.png')
plt.close()
return
def plot_ratios_with_histograms(X='alpha',A='Fe',B='Fe',C='H'):
filename = workdir + '/abundances/abundances/abundances.h5'
hdf5_data = h5py.File(filename, 'r')
dfiles = hdf5_data.keys()
dfile = dfiles[-1] # do this with most recent data file
data = dd.io.load(filename, '/' + str(dfile))
elements = utilities.sort_by_anum([x for x in data['abundances'].keys() if (not 'alpha' in x)])
elements = elements + ['alpha'] + ['H']
age = data['Time'] - data['creation_time'] # age of all particles in this data set
# --------------------
check_elements = [x for x in [X,A,B,C] if (not (x in elements))]
if len(check_elements) > 0:
print(check_elements, " not in elements list")
print("available: ", elements)
raise ValueError
sep = 0.02
left, width = 0.125, 0.65
bottom, height = 0.1, 0.65
left_h = left + width + sep
bottom_h = bottom + height + sep
rect_scatter = [left,bottom,width,height]
# rect_colorbar =
# rect_histx = [left, bottom_h, width, 0.95 - bottom_h - (left-bottom)]
# rect_histy = [left_h, bottom, 0.95 - left_h, height]
# fig,ax = plt.subplots()
fig = plt.figure(1, figsize=(8,8))
# fig.set_size_inches(8,8)
ax_scatter = plt.axes(rect_scatter)
# ax_hist_x = plt.axes(rect_histx)
# ax_hist_y = plt.axes(rect_histy)
# ax_color = plt.axes(rect_colorbar)
x_values = data['abundances'][B][C]
y_values = data['abundances'][X][A]
age = age - np.min(age) # normalize
# scatter plot
p = ax_scatter.scatter(x_values, y_values,
s = point_size, lw = 2, c = age, cmap = 'plasma_r', alpha = 0.75)
p.set_clim([0.0, np.max(age)])
cb = fig.colorbar(p, ax = ax_scatter, orientation = 'horizontal', pad = 0.125, fraction = 0.046,
aspect = 40)
cb.set_label(r'Stellar Age (Myr)')
#
#
#
ax_scatter.set_xlim(-9,-1)
ax_scatter.set_ylim(-1.75,1.75)
ax_scatter.tick_params(axis='x',which='minor',bottom='on')
ax_scatter.tick_params(axis='y',which='minor',bottom='on')
ax_scatter.set_xlabel(r'log([' + B + '/' + C + '])')
ax_scatter.set_ylabel(r'log([' + X + '/' + A + '])')
plt.minorticks_on()
#
# find main plot and construct histograms
#
divider = make_axes_locatable(ax_scatter)
left, bottom, width, height = divider.get_position()
# width, height = divider.get_horizontal(), divider.get_vertical()
sep = 0.01
thickness = np.min( np.array([0.95 - left - width - sep, 0.95 - bottom - height - sep]))
rect_histx = [left, bottom + height + sep, width, thickness]
rect_histy = [left + width + sep, bottom, thickness, height]
ax_hist_x = plt.axes(rect_histx)
ax_hist_y = plt.axes(rect_histy)
# construct the histogram for the horizontal axis (goes up top)
nbins = 100
hist,bins = np.histogram(x_values, bins = nbins)
weights = np.ones(np.size(x_values)) * (1.0 / (1.0*np.max(hist)))
ax_hist_x.hist(x_values, color = 'C0', bins = nbins, weights = weights)
# plot_histogram(ax_hist_x, bins, hist / (1.0*np.max(hist)), color = 'black')
plt.minorticks_on()
# hist,bins = np.histogram(alpha, bins = 24)
# plot_histogram(ax_hist_y, bins, hist / (1.0*np.max(hist)), color = 'black', orientation = 'horizontal')
# now do the same for the vertical axis histogram
nbins = 50
hist,bins = np.histogram(y_values, bins = nbins)
weights = np.ones(np.size(y_values)) * (1.0 / (1.0*np.max(hist)))
ax_hist_y.hist(y_values, orientation='horizontal', color = 'C0', bins = nbins, weights = weights)
ax_hist_x.xaxis.set_major_formatter(NullFormatter())
ax_hist_y.yaxis.set_major_formatter(NullFormatter())
ax_hist_x.set_xlim(ax_scatter.get_xlim())
ax_hist_y.set_ylim(ax_scatter.get_ylim())
ticks = [0.0,0.25,0.5,0.75,1.0]
ax_hist_x.set_yticks(ticks)
ax_hist_y.set_xticks(ticks)
ax_hist_y.set_xticklabels(ticks, rotation = 270)
plt.minorticks_on()
# plt.tight_layout()
fig.savefig(X + '_over_' + A + '_vs_' + B + '_over_' + C + '_hist.png')
plt.close()
return
if __name__ == '__main__':
plot_mass_fraction_time_evolution() #
# plot_ratios_with_histograms('C','O','Fe','H') # C/O vs Fe/H
# plot_ratios_with_histograms('alpha','Mg','Mg','H')
# plot_ratios_with_histograms('alpha','Fe','Fe','H')
# plot_panel() # default [X/Fe] vs [Fe/H]
# plot_panel(A = 'Mg', B = 'Fe', C = 'H')
# plot_panel(A = 'Mg', B = 'Mg', C = 'Fe')
# plot_panel(A = 'O', B = 'Fe', C = 'H')
# plot_panel(A = 'O', B = 'O', C = 'Fe')
# plot_panel(A = 'Ba', B = 'Ba', C = 'Fe')
# plot_MDF(plot_base = ['H','Fe','O','Ba'])
# plot_time_evolution()
# plot_alpha_vs_fe_with_histograms()
# plot_alpha_vs_fe()
# plot_alpha_vs_fe_movie()
# plot_spatial_profiles(bins=np.arange(0,505,10))
# plot_spatial_profiles(field = 'Fe',abundance=True, bins = np.arange(0,505,10))
# plot_spatial_profiles(field = 'H', abundance=True, bins = np.arange(0,505,10))
| mit |
sernst/cauldron | cauldron/session/display/__init__.py | 1 | 23013 | import json as _json_io
import textwrap
import typing
from datetime import timedelta
import cauldron as _cd
from cauldron import environ
from cauldron import render
from cauldron.render import plots as render_plots
from cauldron.render import texts as render_texts
from cauldron.session import report
def _get_report() -> 'report.Report':
"""Fetches the report associated with the currently running step."""
return _cd.project.get_internal_project().current_step.report
def inspect(source: dict):
"""
Inspects the data and structure of the source dictionary object and
adds the results to the display for viewing.
:param source:
A dictionary object to be inspected.
:return:
"""
r = _get_report()
r.append_body(render.inspect(source))
def header(header_text: str, level: int = 1, expand_full: bool = False):
"""
Adds a text header to the display with the specified level.
:param header_text:
The text to display in the header.
:param level:
The level of the header, which corresponds to the html header
levels, such as <h1>, <h2>, ...
:param expand_full:
Whether or not the header will expand to fill the width of the entire
notebook page, or be constrained by automatic maximum page width. The
default value of False lines the header up with text displays.
"""
r = _get_report()
r.append_body(render.header(
header_text,
level=level,
expand_full=expand_full
))
def text(value: str, preformatted: bool = False):
"""
Adds text to the display. If the text is not preformatted, it will be
displayed in paragraph format. Preformatted text will be displayed
inside a pre tag with a monospace font.
:param value:
The text to display.
:param preformatted:
Whether or not to preserve the whitespace display of the text.
"""
if preformatted:
result = render_texts.preformatted_text(value)
else:
result = render_texts.text(value)
r = _get_report()
r.append_body(result)
r.stdout_interceptor.write_source(
'{}\n'.format(textwrap.dedent(value))
)
def markdown(
source: str = None,
source_path: str = None,
preserve_lines: bool = False,
font_size: float = None,
**kwargs
):
"""
Renders the specified source string or source file using markdown and
adds the resulting HTML to the notebook display.
:param source:
A markdown formatted string.
:param source_path:
A file containing markdown text.
:param preserve_lines:
If True, all line breaks will be treated as hard breaks. Use this
for pre-formatted markdown text where newlines should be retained
during rendering.
:param font_size:
Specifies a relative font size adjustment. The default value is 1.0,
which preserves the inherited font size values. Set it to a value
below 1.0 for smaller font-size rendering and greater than 1.0 for
larger font size rendering.
:param kwargs:
Any variable replacements to make within the string using Jinja2
templating syntax.
"""
r = _get_report()
result = render_texts.markdown(
source=source,
source_path=source_path,
preserve_lines=preserve_lines,
font_size=font_size,
**kwargs
)
r.library_includes += result['library_includes']
r.append_body(result['body'])
r.stdout_interceptor.write_source(
'{}\n'.format(textwrap.dedent(result['rendered']))
)
def json(**kwargs):
"""
Adds the specified data to the the output display window with the
specified key. This allows the user to make available arbitrary
JSON-compatible data to the display for runtime use.
:param kwargs:
Each keyword argument is added to the CD.data object with the
specified key and value.
"""
r = _get_report()
r.append_body(render.json(**kwargs))
r.stdout_interceptor.write_source(
'{}\n'.format(_json_io.dumps(kwargs, indent=2))
)
def plotly(
data: typing.Union[dict, list, typing.Any] = None,
layout: typing.Union[dict, typing.Any] = None,
scale: float = 0.5,
figure: typing.Union[dict, typing.Any] = None,
static: bool = False
):
"""
Creates a Plotly plot in the display with the specified data and
layout.
:param data:
The Plotly trace data to be plotted.
:param layout:
The layout data used for the plot.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param figure:
In cases where you need to create a figure instead of separate data
and layout information, you can pass the figure here and leave the
data and layout values as None.
:param static:
If true, the plot will be created without interactivity.
This is useful if you have a lot of plots in your notebook.
"""
r = _get_report()
if not figure and not isinstance(data, (list, tuple)):
data = [data]
if 'plotly' not in r.library_includes:
r.library_includes.append('plotly')
r.append_body(render.plotly(
data=data,
layout=layout,
scale=scale,
figure=figure,
static=static
))
r.stdout_interceptor.write_source('[ADDED] Plotly plot\n')
def table(
data_frame,
scale: float = 0.7,
include_index: bool = False,
max_rows: int = 500,
sample_rows: typing.Optional[int] = None,
formats: typing.Union[
str,
typing.Callable[[typing.Any], str],
typing.Dict[
str,
typing.Union[str, typing.Callable[[typing.Any], str]]
]
] = None
):
"""
Adds the specified data frame to the display in a nicely formatted
scrolling table.
:param data_frame:
The pandas data frame to be rendered to a table.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param include_index:
Whether or not the index column should be included in the displayed
output. The index column is not included by default because it is
often unnecessary extra information in the display of the data.
:param max_rows:
This argument exists to prevent accidentally writing very large data
frames to a table, which can cause the notebook display to become
sluggish or unresponsive. If you want to display large tables, you need
only increase the value of this argument.
:param sample_rows:
When set to a positive integer value, the DataFrame will be randomly
sampled to the specified number of rows when displayed in the table.
If the value here is larger than the number of rows in the DataFrame,
the sampling will have no effect and the entire DataFrame will be
displayed instead.
:param formats:
An optional dictionary that, when specified, should contain a mapping
between column names and formatting strings to apply to that column
for display purposes. For example, ``{'foo': '{:,.2f}%'}`` would
transform a column ``foo = [12.2121, 34.987123, 42.72839]`` to
display as ``foo = [12.21%, 34.99%, 42.73%]``. The formatters should
follow the standard Python string formatting guidelines the same as
the ``str.format()`` command having the value of the column as the only
positional argument in the format arguments. A string value can also
be specified for uniform formatting of all columns (or if displaying
a series with only a single value).
"""
r = _get_report()
r.append_body(render.table(
data_frame=data_frame,
scale=scale,
include_index=include_index,
max_rows=max_rows,
sample_rows=sample_rows,
formats=formats
))
r.stdout_interceptor.write_source('[ADDED] Table\n')
def svg(svg_dom: str, filename: str = None):
"""
Adds the specified SVG string to the display. If a filename is
included, the SVG data will also be saved to that filename within the
project results folder.
:param svg_dom:
The SVG string data to add to the display.
:param filename:
An optional filename where the SVG data should be saved within
the project results folder.
"""
r = _get_report()
r.append_body(render.svg(svg_dom))
r.stdout_interceptor.write_source('[ADDED] SVG\n')
if not filename:
return
if not filename.endswith('.svg'):
filename += '.svg'
r.files[filename] = svg_dom
def jinja(path: str, **kwargs):
"""
Renders the specified Jinja2 template to HTML and adds the output to the
display.
:param path:
The fully-qualified path to the template to be rendered.
:param kwargs:
Any keyword arguments that will be use as variable replacements within
the template.
"""
r = _get_report()
r.append_body(render.jinja(path, **kwargs))
r.stdout_interceptor.write_source('[ADDED] Jinja2 rendered HTML\n')
def whitespace(lines: float = 1.0):
"""
Adds the specified number of lines of whitespace.
:param lines:
The number of lines of whitespace to show.
"""
r = _get_report()
r.append_body(render.whitespace(lines))
r.stdout_interceptor.write_source('\n')
def image(
filename: str,
width: int = None,
height: int = None,
justify: str = 'left'
):
"""
Adds an image to the display. The image must be located within the
assets directory of the Cauldron notebook's folder.
:param filename:
Name of the file within the assets directory,
:param width:
Optional width in pixels for the image.
:param height:
Optional height in pixels for the image.
:param justify:
One of 'left', 'center' or 'right', which specifies how the image
is horizontally justified within the notebook display.
"""
r = _get_report()
path = '/'.join(['reports', r.project.uuid, 'latest', 'assets', filename])
r.append_body(render.image(path, width, height, justify))
r.stdout_interceptor.write_source('[ADDED] Image\n')
def html(dom: str):
"""
A string containing a valid HTML snippet.
:param dom:
The HTML string to add to the display.
"""
r = _get_report()
r.append_body(render.html(dom))
r.stdout_interceptor.write_source('[ADDED] HTML\n')
def workspace(show_values: bool = True, show_types: bool = True):
"""
Adds a list of the shared variables currently stored in the project
workspace.
:param show_values:
When true the values for each variable will be shown in addition to
their name.
:param show_types:
When true the data types for each shared variable will be shown in
addition to their name.
"""
r = _get_report()
data = {}
for key, value in r.project.shared.fetch(None).items():
if key.startswith('__cauldron_'):
continue
data[key] = value
r.append_body(render.status(data, values=show_values, types=show_types))
def pyplot(
figure=None,
scale: float = 0.8,
clear: bool = True,
aspect_ratio: typing.Union[list, tuple] = None
):
"""
Creates a matplotlib plot in the display for the specified figure. The size
of the plot is determined automatically to best fit the notebook.
:param figure:
The matplotlib figure to plot. If omitted, the currently active
figure will be used.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param clear:
Clears the figure after it has been rendered. This is useful to
prevent persisting old plot data between repeated runs of the
project files. This can be disabled if the plot is going to be
used later in the project files.
:param aspect_ratio:
The aspect ratio for the displayed plot as a two-element list or
tuple. The first element is the width and the second element the
height. The units are "inches," which is an important consideration
for the display of text within the figure. If no aspect ratio is
specified, the currently assigned values to the plot will be used
instead.
"""
r = _get_report()
r.append_body(render_plots.pyplot(
figure,
scale=scale,
clear=clear,
aspect_ratio=aspect_ratio
))
r.stdout_interceptor.write_source('[ADDED] PyPlot plot\n')
def bokeh(model, scale: float = 0.7, responsive: bool = True):
"""
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
"""
r = _get_report()
if 'bokeh' not in r.library_includes:
r.library_includes.append('bokeh')
r.append_body(render_plots.bokeh_plot(
model=model,
scale=scale,
responsive=responsive
))
r.stdout_interceptor.write_source('[ADDED] Bokeh plot\n')
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n')
def list_grid(
source: list,
expand_full: bool = False,
column_count: int = 2,
row_spacing: float = 1.0
):
"""
An multi-column list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
:param column_count:
The number of columns to display. The specified count is applicable to
high-definition screens. For Lower definition screens the actual count
displayed may be fewer as the layout responds to less available
horizontal screen space.
:param row_spacing:
The number of lines of whitespace to include between each row in the
grid. Set this to 0 for tightly displayed lists.
"""
r = _get_report()
r.append_body(render.list_grid(
source=source,
expand_full=expand_full,
column_count=column_count,
row_spacing=row_spacing
))
r.stdout_interceptor.write_source('[ADDED] List grid\n')
def latex(source: str):
"""
Add a mathematical equation in latex math-mode syntax to the display.
Instead of the traditional backslash escape character, the @ character is
used instead to prevent backslash conflicts with Python strings. For
example, \\delta would be @delta.
:param source:
The string representing the latex equation to be rendered.
"""
r = _get_report()
if 'katex' not in r.library_includes:
r.library_includes.append('katex')
r.append_body(render_texts.latex(source.replace('@', '\\')))
r.stdout_interceptor.write_source('[ADDED] Latex equation\n')
def head(source, count: int = 5):
"""
Displays a specified number of elements in a source object of many
different possible types.
:param source:
DataFrames will show *count* rows of that DataFrame. A list, tuple or
other iterable, will show the first *count* rows. Dictionaries will
show *count* keys from the dictionary, which will be randomly selected
unless you are using an OrderedDict. Strings will show the first
*count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.head(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Head\n')
def tail(source, count: int = 5):
"""
The opposite of the head function. Displays the last *count* elements of
the *source* object.
:param source:
DataFrames will show the last *count* rows of that DataFrame. A list,
tuple or other iterable, will show the last *count* rows. Dictionaries
will show *count* keys from the dictionary, which will be randomly
selected unless you are using an OrderedDict. Strings will show the
last *count* characters.
:param count:
The number of elements to show from the source.
"""
r = _get_report()
r.append_body(render_texts.tail(source, count=count))
r.stdout_interceptor.write_source('[ADDED] Tail\n')
def status(
message: str = None,
progress: float = None,
section_message: str = None,
section_progress: float = None,
):
"""
Updates the status display, which is only visible while a step is running.
This is useful for providing feedback and information during long-running
steps.
A section progress is also available for cases where long running tasks
consist of multiple tasks and you want to display sub-progress messages
within the context of the larger status.
Note: this is only supported when running in the Cauldron desktop
application.
:param message:
The status message you want to display. If left blank the previously
set status message will be retained. Should you desire to remove an
existing message, specify a blank string for this argument.
:param progress:
A number between zero and one that indicates the overall progress for
the current status. If no value is specified, the previously assigned
progress will be retained.
:param section_message:
The status message you want to display for a particular task within a
long-running step. If left blank the previously set section message
will be retained. Should you desire to remove an existing message,
specify a blank string for this argument.
:param section_progress:
A number between zero and one that indicates the progress for the
current section status. If no value is specified, the previously
assigned section progress value will be retained.
"""
environ.abort_thread()
r = _get_report()
step = _cd.project.get_internal_project().current_step
changes = 0
has_changed = step.progress_message != message
if message is not None and has_changed:
changes += 1
step.progress_message = message
has_changed = step.progress_message != max(0, min(1, progress or 0))
if progress is not None and has_changed:
changes += 1
step.progress = max(0.0, min(1.0, progress))
has_changed = step.sub_progress_message != section_message
if section_message is not None and has_changed:
changes += 1
step.sub_progress_message = section_message
has_changed = step.sub_progress != max(0, min(1, section_progress or 0))
if section_progress is not None and has_changed:
changes += 1
step.sub_progress = section_progress
if changes > 0:
# update the timestamp to inform rendering that a status
# has changed and should be re-rendered into the step.
r.update_last_modified()
def code_block(
code: str = None,
path: str = None,
language_id: str = None,
title: str = None,
caption: str = None
):
"""
Adds a block of syntax highlighted code to the display from either
the supplied code argument, or from the code file specified
by the path argument.
:param code:
A string containing the code to be added to the display
:param path:
A path to a file containing code to be added to the display
:param language_id:
The language identifier that indicates what language should
be used by the syntax highlighter. Valid values are any of the
languages supported by the Pygments highlighter.
:param title:
If specified, the code block will include a title bar with the
value of this argument
:param caption:
If specified, the code block will include a caption box below the code
that contains the value of this argument
"""
environ.abort_thread()
r = _get_report()
r.append_body(render.code_block(
block=code,
path=path,
language=language_id,
title=title,
caption=caption
))
r.stdout_interceptor.write_source('{}\n'.format(code))
def elapsed():
"""
Displays the elapsed time since the step started running.
"""
environ.abort_thread()
step = _cd.project.get_internal_project().current_step
r = _get_report()
r.append_body(render.elapsed_time(step.elapsed_time))
result = '[ELAPSED]: {}\n'.format(timedelta(seconds=step.elapsed_time))
r.stdout_interceptor.write_source(result)
| mit |
rwgdrummer/maskgen | maskgen/analytics/dctAnalytic.py | 1 | 17525 | # =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
#
#
# adapted from https://github.com/enmasse/jpeg_read
#==============================================================================
import sys
from math import *
from Tkinter import *
import matplotlib.pyplot as plt
import numpy as np
import logging
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
def memoize (function):
# http://programmingzen.com/2009/05/18/memoization-in-ruby-and-python/
cache = {}
def decorated_function (*args):
try:
return cache[args]
except KeyError:
val = function (*args)
cache[args] = val
return val
return decorated_function
@memoize
def decodeBits (len, val):
""" Calculate the value from the "additional" bits in the huffman data. """
return val if (val & (1 << len - 1)) else val - ((1 << len) - 1)
def extractCoeffs (data):
dclum = []
dcchr1 = []
dcchr2 = []
aclum = []
acchr1 = []
acchr2 = []
for MCU in data:
lum = MCU[0]
chr1 = MCU[1]
chr2 = MCU[2]
for MCU_component in lum:
if len (MCU_component):
dclum.append (MCU_component[0])
aclum.extend (MCU_component[1:])
for MCU_component in chr1:
if len (MCU_component):
dcchr1.append (MCU_component[0])
acchr1.extend (MCU_component[1:])
for MCU_component in chr2:
if len (MCU_component):
dcchr2.append (MCU_component[0])
acchr2.extend (MCU_component[1:])
return (dclum, dcchr1, dcchr2, aclum, acchr1, acchr2)
def generateHuffmanCodes (huffsize):
""" Calculate the huffman code of each length. """
huffcode = []
k = 0
code = 0
# Magic
for i in range (len (huffsize)):
si = huffsize[i]
for k in range (si):
huffcode.append ((i + 1, code))
code += 1
code <<= 1
return huffcode
def getBits (num, gen):
""" Get "num" bits from gen. """
out = 0
for i in range (num):
out <<= 1
val = gen.next ()
if val != []:
out += val & 0x01
else:
return []
return out
def mapHuffmanCodes (codes, values):
""" Map the huffman code to the right value. """
out = {}
for i in range (len (codes)):
out[codes[i]] = values[i]
return out
def readAPP (type, file):
""" Read APP marker. """
Lp = readWord (file)
Lp -= 2
# If APP0 try to read the JFIF header
# Not really necessary
if type == 0:
identifier = file.read (5)
Lp -= 5
version = file.read (2)
Lp -= 2
units = ord (file.read (1))
Lp -= 1
Xdensity = ord (file.read (1)) << 8
Xdensity |= ord (file.read (1))
Lp -= 2
Ydensity = ord (file.read (1)) << 8
Ydensity |= ord (file.read (1))
Lp -= 2
file.seek (Lp, 1)
def readByte (file):
""" Read a byte from file. """
return ord (file.read (1))
def readWord (file):
""" Read a 16 bit word from file. """
return ord (file.read (1)) << 8 | ord (file.read (1))
def restoreDC (data):
""" Restore the DC values. They are coded as the difference from the
previous DC value of the same component.
"""
out = []
dc_prev = [0 for x in range (len (data[0]))]
# For each MCU
for mcu in data:
# For each component
for comp_num in range (len (mcu)):
# For each DU
for du in range (len (mcu[comp_num])):
if mcu[comp_num][du]:
mcu[comp_num][du][0] += dc_prev[comp_num]
dc_prev[comp_num] = mcu[comp_num][du][0]
out.append (mcu)
return out
class JPEG_Reader:
""" Class for reading DCT coefficients from JPEG files. """
def __init__ (self):
self.huffman_ac_tables = [{}, {}, {}, {}]
self.huffman_dc_tables = [{}, {}, {}, {}]
self.q_table = [[], [], [], []]
self.XYP = 0, 0, 0
self.component = {}
self.num_components = 0
self.mcus_read = 0
self.dc = []
self.inline_dc = 0
self.bit_stream = []
self.EOI = False
def readDCT_Coeffs (self, filename):
""" Reads and returns DCT coefficients from the supplied JPEG file. """
self.__init__ ()
data = []
with open (filename, "rb") as inputFile:
in_char = inputFile.read (1)
while in_char:
if in_char == chr (0xff):
in_char = inputFile.read (1)
in_num = ord (in_char)
if 0xe0 <= in_num <= 0xef:
readAPP (in_num - 0xe0, inputFile)
elif in_num == 0xdb:
self.__readDQT (inputFile)
elif in_num == 0xdc:
self.__readDNL (inputFile)
elif in_num == 0xc4:
self.__readDHT (inputFile)
elif in_num == 0xc8:
print "JPG"
elif 0xc0 <= in_num <= 0xcf:
self.__readSOF (in_num - 0xc0, inputFile)
elif in_num == 0xda:
self.__readSOS (inputFile)
self.bit_stream = self.__readBit (inputFile)
while not self.EOI:
data.append (self.__readMCU ())
in_char = inputFile.read (1)
return extractCoeffs (data if self.inline_dc else restoreDC (data))
def __readBit (self, file):
""" A generator that reads one bit from file and handles markers and
byte stuffing.
"""
input = file.read (1)
while input and not self.EOI:
if input == chr (0xFF):
cmd = file.read (1)
if cmd:
# Byte stuffing
if cmd == chr (0x00):
input = chr (0xFF)
# End of image marker
elif cmd == chr (0xD9):
self.EOI = True
# Restart markers
elif 0xD0 <= ord (cmd) <= 0xD7 and self.inline_dc:
# Reset dc value
self.dc = [0 for i in range (self.num_components + 1)]
input = file.read (1)
else:
input = file.read (1)
#print "CMD: %x" % ord(cmd)
if not self.EOI:
for i in range (7, -1, -1):
# Output next bit
yield (ord (input) >> i) & 0x01
input = file.read (1)
while True:
yield []
def __readDHT (self, file):
""" Read and compute the huffman tables. """
# Read the marker length
Lh = readWord (file)
Lh -= 2
while Lh > 0:
huffsize = []
huffval = []
T = readByte (file)
Th = T & 0x0F
Tc = (T >> 4) & 0x0F
#print "Lh: %d Th: %d Tc: %d" % (Lh, Th, Tc)
Lh -= 1
# Read how many symbols of each length
# up to 16 bits
for i in range (16):
huffsize.append (readByte (file))
Lh -= 1
# Generate the huffman codes
huffcode = generateHuffmanCodes (huffsize)
#print "Huffcode", huffcode
# Read the values that should be mapped to huffman codes
for i in huffcode:
#print i
try:
huffval.append (readByte (file))
Lh -= 1
except TypeError:
continue
# Generate lookup tables
if Tc == 0:
self.huffman_dc_tables[Th] = mapHuffmanCodes (huffcode, huffval)
else:
self.huffman_ac_tables[Th] = mapHuffmanCodes (huffcode, huffval)
def __readDNL (self, file):
""" Read the DNL marker. Changes the number of lines. """
Ld = readWord (file)
Ld -= 2
NL = readWord (file)
Ld -= 2
X, Y, P = self.XYP
if Y == 0:
self.XYP = X, NL, P
def __readDQT (self, file):
""" Read the quantization table. The table is in zigzag order. """
Lq = readWord (file)
Lq -= 2
while Lq > 0:
table = []
Tq = readByte (file)
Pq = Tq >> 4
Tq &= 0xF
Lq -= 1
if Pq == 0:
for i in range (64):
table.append (readByte (file))
Lq -= 1
else:
for i in range (64):
val = readWord (file)
table.append (val)
Lq -= 2
self.q_table[Tq] = table
def __readDU (self, comp_num):
""" Read one data unit with component index comp_num. """
data = []
comp = self.component[comp_num]
huff_tbl = self.huffman_dc_tables[comp['Td']]
# Fill data with 64 coefficients
while len (data) < 64:
key = 0
for bits in range (1, 17):
key_len = []
key <<= 1
# Get one bit from bit_stream
val = getBits (1, self.bit_stream)
if val == []:
break
key |= val
# If huffman code exists
if huff_tbl.has_key ((bits, key)):
key_len = huff_tbl[(bits, key)]
break
# After getting the DC value switch to the AC table
huff_tbl = self.huffman_ac_tables[comp['Ta']]
if key_len == []:
#print (bits, key, bin(key)), "key not found"
break
# If ZRL fill with 16 zero coefficients
elif key_len == 0xF0:
for i in range (16):
data.append (0)
continue
# If not DC coefficient
if len (data) != 0:
# If End of block
if key_len == 0x00:
# Fill the rest of the DU with zeros
while len (data) < 64:
data.append (0)
break
# The first part of the AC key_len is the number of leading
# zeros
for i in range (key_len >> 4):
if len (data) < 64:
data.append (0)
key_len &= 0x0F
if len (data) >= 64:
break
if key_len != 0:
# The rest of key_len is the number of "additional" bits
val = getBits (key_len, self.bit_stream)
if val == []:
break
# Decode the additional bits
num = decodeBits (key_len, val)
# Experimental, doesn't work right
if len (data) == 0 and self.inline_dc:
# The DC coefficient value is added to the DC value from
# the corresponding DU in the previous MCU
num += self.dc[comp_num]
self.dc[comp_num] = num
data.append (num)
else:
data.append (0)
#if len(data) != 64:
#print "Wrong size", len(data)
return data
def __readMCU (self):
""" Read an MCU. """
comp_num = mcu = range (self.num_components)
# For each component
for i in comp_num:
comp = self.component[i + 1]
mcu[i] = []
# For each DU
for j in range (comp['H'] * comp['V']):
if not self.EOI:
mcu[i].append (self.__readDU (i + 1))
self.mcus_read += 1
return mcu
def __readSOF (self, type, file):
""" Read the start of frame marker. """
Lf = readWord (file) # Read the marker length
Lf -= 2
P = readByte (file) # Read the sample precision
Lf -= 1
Y = readWord (file) # Read number of lines
Lf -= 2
X = readWord (file) # Read the number of samples per line
Lf -= 2
Nf = readByte (file) # Read number of components
Lf -= 1
self.XYP = X, Y, P
#print self.XYP
while Lf > 0:
C = readByte (file) # Read component identifier
V = readByte (file) # Read sampling factors
Tq = readByte (file)
Lf -= 3
H = V >> 4
V &= 0xF
# Assign horizontal & vertical sampling factors and qtable
self.component[C] = { 'H' : H, 'V' : V, 'Tq' : Tq }
def __readSOS (self, file):
""" Read the start of scan marker. """
Ls = readWord (file)
Ls -= 2
Ns = readByte (file) # Read number of components in scan
Ls -= 1
for i in range (Ns):
Cs = readByte (file) # Read the scan component selector
Ls -= 1
Ta = readByte (file) # Read the huffman table selectors
Ls -= 1
Td = Ta >> 4
Ta &= 0xF
# Assign the DC huffman table
self.component[Cs]['Td'] = Td
# Assign the AC huffman table
self.component[Cs]['Ta'] = Ta
Ss = readByte (file) # Should be zero if baseline DCT
Ls -= 1
Se = readByte (file) # Should be 63 if baseline DCT
Ls -= 1
A = readByte (file) # Should be zero if baseline DCT
Ls -= 1
#print "Ns:%d Ss:%d Se:%d A:%02X" % (Ns, Ss, Se, A)
self.num_components = Ns
self.dc = [0 for i in range (self.num_components + 1)]
def dequantize (self, mcu):
""" Dequantize an MCU. """
out = mcu
# For each coefficient in each DU in each component, multiply by the
# corresponding value in the quantization table.
for c in range (len (out)):
for du in range (len (out[c])):
for i in range (len (out[c][du])):
out[c][du][i] *= self.q_table[self.component[c + 1]['Tq']][i]
return out
def getHist(filename):
try:
import JPEG_MetaInfoPy
hist, lowValue = JPEG_MetaInfoPy.generateHistogram(filename)
return np.asarray(hist),np.asarray(range(lowValue,lowValue+len(hist)+1))
except Exception as ex:
logging.getLogger('maskgen').warn('External JPEG_MetaInfoPy failed: {}'.format(str(ex)))
DC = JPEG_Reader().readDCT_Coeffs(filename)[0]
minDC = min(DC)
maxDC = max(DC)
binCount = maxDC - minDC + 1
return np.histogram (DC, bins=binCount,
range=(minDC, maxDC + 1))
class JPEG_View:
def appliesTo (self, filename):
return filename.lower ().endswith (('jpg', 'jpeg'))
def draw (self, frame, filename):
fig = plt.figure ();
self._plotHistogram (fig, getHist(filename))
canvas = FigureCanvasTkAgg (fig, frame)
canvas.show ()
canvas.get_tk_widget ().pack (side=BOTTOM, fill=BOTH, expand=True)
def _labelSigma (self, figure, sigma):
""" Add a label of the value of sigma to the histogram plot. """
props = dict (boxstyle='round', facecolor='wheat', alpha=0.5)
figure.text (0.25, 0.85, '$\sigma=%.2f$' % (sigma),
fontsize=14, verticalalignment='top', bbox=props)
class DCTView (JPEG_View):
def screenName (self):
return 'JPG DCT Histogram'
def _plotHistogram (self, figure, histogram):
ordinates, abscissae = histogram
plt.bar (abscissae[:-1], ordinates, 1);
self._labelSigma (figure, ordinates.std ())
class FFT_DCTView (JPEG_View):
def screenName (self):
return 'FFT(JPG DCT Histogram)'
def _plotHistogram (self, figure, histogram):
# Calculate the DFT of the zero-meaned histogram values. The n/2+1
# positive frequencies are returned by rfft. Mirror the result back
# into ordinates.
#
mean = histogram[0].mean ()
posFreqs = abs (np.fft.rfft ([i - mean for i in histogram[0]]))
ordinates = list (reversed (posFreqs))
ordinates.extend (posFreqs[1:])
n = len (posFreqs)
abscissae = range (1 - n, n)
plt.plot (abscissae, ordinates, 'k')
plt.plot (abscissae, self.__hat (ordinates), 'r')
self._labelSigma (figure, np.std (ordinates))
def __hat (self, data):
length = len (data)
intercept1 = int (length * 0.425)
intercept2 = int (length * 0.575)
amp = max (data)
threshold = amp * 0.15
arr = np.full (length, threshold)
arr[intercept1:intercept2] = amp
return arr
if __name__ == "__main__":
DCTView ().draw (None, sys.argv[1])
FFT_DCTView ().draw (None, sys.argv[1]) | bsd-3-clause |
DataCanvasIO/example-modules | modules/modeling/basic/linear_svc_estimator/main.py | 2 | 1630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from specparser import get_settings_from_file
from pprint import pprint
import csv
from sklearn.svm import LinearSVC
import numpy as np
from sklearn.externals import joblib
import matplotlib
matplotlib.use('Agg')
import datetime
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def drawPrecisionRecall(X,Y,output_file):
pdf = PdfPages(output_file)
plt.figure(figsize=(len(Y), len(X)))
plt.plot(Y, X, 'r-o')
plt.title('Precision/Recall')
pdf.savefig() # saves the current figure into a pdf page
plt.close()
pdf.close()
def readcolumn(filename):
column = []
with open(filename,"r") as fconcl:
for line in fconcl:
column.append(line.rstrip('\n'))
return column
def main():
settings = get_settings_from_file("spec.json")
print(settings)
X = np.genfromtxt(settings.Input.X, delimiter=',', skip_header=1)
svc = joblib.load(settings.Input.MODEL)
Y_out = svc.predict(X)
Y_list = [Y_out]
np.savetxt("./conclusion.csv", Y_out, fmt="%d", delimiter=",")
conclusion = readcolumn("./conclusion.csv")
label = readcolumn(settings.Input.Y)
precision_list = []
recall_list = []
hits = 0
for i in range(len(label)):
if conclusion[i] == label[i]:
hits+=1
precision_list.append(1.0*hits/(i+1))
recall_list.append(1.0*hits/(len(label)))
drawPrecisionRecall(precision_list,recall_list,settings.Output.report)
print("Done")
if __name__ == "__main__":
main()
| bsd-3-clause |
bjlittle/iris | docs/gallery_code/oceanography/plot_atlantic_profiles.py | 2 | 3317 | """
Oceanographic Profiles and T-S Diagrams
=======================================
This example demonstrates how to plot vertical profiles of different
variables in the same axes, and how to make a scatter plot of two
variables. There is an oceanographic theme but the same techniques are
equally applicable to atmospheric or other kinds of data.
The data used are profiles of potential temperature and salinity in the
Equatorial and South Atlantic, output from an ocean model.
The y-axis of the first plot produced will be automatically inverted due to the
presence of the attribute positive=down on the depth coordinate. This means
depth values intuitively increase downward on the y-axis.
"""
import matplotlib.pyplot as plt
import iris
import iris.iterate
import iris.plot as iplt
def main():
# Load the gridded temperature and salinity data.
fname = iris.sample_data_path("atlantic_profiles.nc")
cubes = iris.load(fname)
(theta,) = cubes.extract("sea_water_potential_temperature")
(salinity,) = cubes.extract("sea_water_practical_salinity")
# Extract profiles of temperature and salinity from a particular point in
# the southern portion of the domain, and limit the depth of the profile
# to 1000m.
lon_cons = iris.Constraint(longitude=330.5)
lat_cons = iris.Constraint(latitude=lambda l: -10 < l < -9)
depth_cons = iris.Constraint(depth=lambda d: d <= 1000)
theta_1000m = theta.extract(depth_cons & lon_cons & lat_cons)
salinity_1000m = salinity.extract(depth_cons & lon_cons & lat_cons)
# Plot these profiles on the same set of axes. Depth is automatically
# recognised as a vertical coordinate and placed on the y-axis.
# The first plot is in the default axes. We'll use the same color for the
# curve and its axes/tick labels.
plt.figure(figsize=(5, 6))
temperature_color = (0.3, 0.4, 0.5)
ax1 = plt.gca()
iplt.plot(
theta_1000m,
linewidth=2,
color=temperature_color,
alpha=0.75,
)
ax1.set_xlabel("Potential Temperature / K", color=temperature_color)
ax1.set_ylabel("Depth / m")
for ticklabel in ax1.get_xticklabels():
ticklabel.set_color(temperature_color)
# To plot salinity in the same axes we use twiny(). We'll use a different
# color to identify salinity.
salinity_color = (0.6, 0.1, 0.15)
ax2 = plt.gca().twiny()
iplt.plot(
salinity_1000m,
linewidth=2,
color=salinity_color,
alpha=0.75,
)
ax2.set_xlabel("Salinity / PSU", color=salinity_color)
for ticklabel in ax2.get_xticklabels():
ticklabel.set_color(salinity_color)
plt.tight_layout()
iplt.show()
# Now plot a T-S diagram using scatter. We'll use all the profiles here,
# and each point will be coloured according to its depth.
plt.figure(figsize=(6, 6))
depth_values = theta.coord("depth").points
for s, t in iris.iterate.izip(salinity, theta, coords="depth"):
iplt.scatter(s, t, c=depth_values, marker="+", cmap="RdYlBu_r")
ax = plt.gca()
ax.set_xlabel("Salinity / PSU")
ax.set_ylabel("Potential Temperature / K")
cb = plt.colorbar(orientation="horizontal")
cb.set_label("Depth / m")
plt.tight_layout()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
dingocuster/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
nguy/brawl4d | LMA/controller.py | 1 | 10240 | """ Support for LMA data display in brawl4d.
These are meant to be lightweight wrappers to coordinate data formats
understood by the lmatools package.
"""
import numpy as np
from lmatools.flashsort.autosort.LMAarrayFile import LMAdataFile
from stormdrain.bounds import Bounds, BoundsFilter
from stormdrain.data import NamedArrayDataset, indexed
from stormdrain.pipeline import Branchpoint, coroutine, ItemModifier
from stormdrain.support.matplotlib.artistupdaters import PanelsScatterController
from stormdrain.support.matplotlib.poly_lasso import LassoPayloadController
class LMAAnimator(object):
def __init__(self, duration, variable='time'):
self.tstart = time.time()
self.duration = duration
def draw_frame(self, animator, time_fraction):
pass
def init_draw(self, animator):
pass
class LMAController(object):
""" Manages bounds object with LMA-specific criteria. Convenience functions for loading LMA data.
"""
z_alt_mapping = {'z':('alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) ) }
def __init__(self, *args, **kwargs):
super(LMAController, self).__init__(*args, **kwargs)
self.bounds = Bounds(chi2=(0.0, 1.0), stations=(6, 99))
self.default_color_bounds = Bounds(parent=self.bounds, charge=(-1,1))
self.datasets = set()
self.flash_datasets = set()
def pipeline_for_dataset(self, d, panels,
names4d=('lon', 'lat', 'alt', 'time'),
transform_mapping=None,
scatter_kwargs = {}
):
""" Set 4d_names to the spatial coordinate names in d that provide
longitude, latitude, altitude, and time. Default of
lon, lat, alt, and time which are assumed to be in deg, deg, meters, seconds
entries in the scatter_kwargs dictionary are passed as kwargs to the matplotlib
scatter call.
"""
# Set up dataset -> time-height bound filter -> brancher
branch = Branchpoint([])
brancher = branch.broadcast()
# strictly speaking, z in the map projection and MSL alt aren't the same - z is somewhat distorted by the projection.
# therefore, add some padding. filtered again later after projection.
quality_filter = BoundsFilter(target=brancher, bounds=self.bounds).filter()
if transform_mapping is None:
transform_mapping = self.z_alt_mapping
# Use 'time', which is the name in panels.bounds, and not names4d[3], which should
# is linked to 'time' by transform_mapping if necessary
bound_filter = BoundsFilter(target=quality_filter, bounds=panels.bounds,
restrict_to=('time'), transform_mapping=transform_mapping)
filterer = bound_filter.filter()
d.target = filterer
# Set up brancher -> coordinate transform -> final_filter -> mutli-axis scatter updater
scatter_ctrl = PanelsScatterController(
panels=panels,
color_field=names4d[3],
default_color_bounds=self.default_color_bounds,
**scatter_kwargs)
scatter_outlet_broadcaster = scatter_ctrl.branchpoint
scatter_updater = scatter_outlet_broadcaster.broadcast()
final_bound_filter = BoundsFilter(target=scatter_updater, bounds=panels.bounds)
final_filterer = final_bound_filter.filter()
cs_transformer = panels.cs.project_points(
target=final_filterer,
x_coord='x', y_coord='y', z_coord='z',
lat_coord=names4d[1], lon_coord=names4d[0], alt_coord=names4d[2],
distance_scale_factor=1.0e-3)
branch.targets.add(cs_transformer)
# return each broadcaster so that other things can tap into results of transformation of this dataset
return branch, scatter_ctrl
@coroutine
def flash_stat_printer(self, min_points=10):
while True:
ev, fl = (yield)
template = "{0} of {1} flashes have > {3} points. Their average area = {2:5.1f} km^2"
N = len(fl)
good = (fl['n_points'] >= min_points)
N_good = len(fl[good])
area = np.mean(fl['area'][good])
print template.format(N_good, N, area, min_points)
def flash_stats_for_dataset(self, d, selection_broadcaster):
flash_stat_branchpoint = Branchpoint([self.flash_stat_printer()])
flash_stat_brancher = flash_stat_branchpoint.broadcast()
@coroutine
def flash_data_for_selection(target, flash_id_key = 'flash_id'):
""" Accepts an array of event data from the pipeline, and sends
event and flash data.
"""
while True:
ev = (yield) # array of event data
fl_dat = d.flash_data
flash_ids = set(ev[flash_id_key])
flashes = np.fromiter(
(fl for fl in fl_dat if fl[flash_id_key] in flash_ids),
dtype=fl_dat.dtype)
target.send((ev, flashes))
selection_broadcaster.targets.add(flash_data_for_selection(flash_stat_brancher))
return flash_stat_branchpoint
@indexed()
def read_dat(self, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
lma = LMAdataFile(*args, **kwargs)
stn = lma.stations # adds stations to lma.data as a side-effect
d = NamedArrayDataset(lma.data)
self.datasets.add(d)
return d
def load_dat_to_panels(self, panels, *args, **kwargs):
""" All args and kwargs are passed to the LMAdataFile object from lmatools"""
d = self.read_dat(*args, **kwargs)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels)
branch_to_scatter_artists = scatter_ctrl.branchpoint
# ask for a copy of the array from each selection operation, so that
# it's saved and ready for any lasso operations
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
@indexed(index_name='hdf_row_idx')
def read_hdf5(self, LMAfileHDF):
try:
import tables
except ImportError:
print "couldn't import pytables"
return None
from hdf5_lma import HDF5Dataset
# get the HDF5 table name
LMAh5 = tables.openFile(LMAfileHDF, 'r')
table_names = LMAh5.root.events._v_children.keys()
table_path = '/events/' + table_names[0]
LMAh5.close()
d = HDF5Dataset(LMAfileHDF, table_path=table_path, mode='a')
self.datasets.add(d)
if d.flash_table is not None:
print "found flash data"
return d
def load_hdf5_to_panels(self, panels, LMAfileHDF, scatter_kwargs={}):
d = self.read_hdf5(LMAfileHDF)
post_filter_brancher, scatter_ctrl = self.pipeline_for_dataset(d, panels, scatter_kwargs=scatter_kwargs)
branch_to_scatter_artists = scatter_ctrl.branchpoint
charge_lasso = LassoChargeController(
target=ItemModifier(
target=d.update(index_name='hdf_row_idx',
field_names=['charge']),
item_name='charge').modify())
branch_to_scatter_artists.targets.add(charge_lasso.cache_segment.cache_segment())
return d, post_filter_brancher, scatter_ctrl, charge_lasso
def load_hdf5_flashes_to_panels(self, panels, hdf5dataset, min_points=10):
""" Set up a flash dataset display. The sole argument is usually the HDF5
LMA dataset returned by a call to self.load_hdf5_to_panels """
from hdf5_lma import HDF5FlashDataset
if hdf5dataset.flash_table is not None:
point_count_dtype = hdf5dataset.flash_data['n_points'].dtype
self.bounds.n_points = (min_points, np.iinfo(point_count_dtype))
flash_d = HDF5FlashDataset(hdf5dataset)
transform_mapping = {}
transform_mapping['time'] = ('start', (lambda v: (v[0], v[1])) )
transform_mapping['lat'] = ('init_lat', (lambda v: (v[0], v[1])) )
transform_mapping['lon'] = ('init_lon', (lambda v: (v[0], v[1])) )
transform_mapping['z'] = ('init_alt', (lambda v: (v[0]*1.0e3 - 1.0e3, v[1]*1.0e3 + 1.0e3)) )
flash_post_filter_brancher, flash_scatter_ctrl = self.pipeline_for_dataset(flash_d, panels,
transform_mapping=transform_mapping,
names4d=('init_lon', 'init_lat', 'init_alt', 'start') )
for art in flash_scatter_ctrl.artist_outlet_controllers:
# there is no time variable, but the artist updater is set to expect
# time. Patch that up.
if art.coords == ('time', 'z'):
art.coords = ('start', 'z')
# Draw flash markers in a different style
art.artist.set_edgecolor('k')
self.flash_datasets.add(flash_d)
return flash_d, flash_post_filter_brancher, flash_scatter_ctrl
class LassoChargeController(LassoPayloadController):
""" The "charge" attribute is one of {-1, 0, 1} to set
negative, unclassified, or positive charge, or None
to do nothing.
"""
charge = LassoPayloadController.Payload() | bsd-2-clause |
ashokpant/clandmark | python_interface/bin/flandmark_demo.py | 6 | 2152 | import numpy as np
import os
from fnmatch import fnmatch
from py_flandmark import PyFlandmark
from PIL import Image
import ImageDraw
import matplotlib.pyplot as plt
def rgb2gray(rgb):
"""
converts rgb array to grey scale variant
accordingly to fomula taken from wiki
(this function is missing in python)
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def read_bbox_from_txt(file_name):
"""
returns 2x2 matrix coordinates of
left upper and right lower corners
of rectangle that contains face stored
in columns of matrix
"""
f = open(file_name)
str = f.read().replace(',', ' ')
f.close()
ret = np.array(map(int,str.split()) ,dtype=np.int32)
ret = ret.reshape((2,2), order='F')
return ret
DIR = '../../../data/Images/'
JPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]
flmrk = PyFlandmark("../../../data/flandmark_model.xml", False)
for jpg_name in JPGS:
file_name = jpg_name[:-4]
img = Image.open(DIR + jpg_name)
arr = rgb2gray(np.asarray(img))
bbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')
d_landmarks = flmrk.detect(arr, bbox)
n = d_landmarks.shape[1]
print "test detect method"
im = Image.fromarray(arr)
img_dr = ImageDraw.Draw(im)
img_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline="#FF00FF")
r = 2.
for i in xrange(n):
x = d_landmarks[0,i]
y = d_landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect method"
frame = flmrk.get_normalized_frame(arr, bbox)[0]
frame = frame.astype(np.double)
im = Image.fromarray(frame)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect_base method"
landmarks = flmrk.detect_base(frame)
im = Image.fromarray(frame)
img_dr = ImageDraw.Draw(im)
r = 2.
for i in xrange(n):
x = landmarks[0,i]
y = landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test psi method"
psi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)
#flmrk.get_psi(d_landmarks, arr, bbox)
break | gpl-3.0 |
bradleyhd/netsim | nodes_vs_routing_speed.py | 1 | 2878 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
def exponential(x, a, b, c):
return a * x**b + c
fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k')
xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]]
ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]]
ys = np.array(ys) * 1000
def graph(i, label, color, marker, l_marker):
y = np.array(ys[i])
x = np.array(xs[i])
xl = np.linspace(np.min(x), np.max(x), 500)
popt, pcov = curve_fit(exponential, x, y)
plt.scatter(x, y, label=label, color=color, marker=marker)
plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker)
blue = '#5738FF'
purple = '#E747E7'
orange = '#E7A725'
green = '#A1FF47'
red = '#FF1E43'
gray = '#333333'
white = 'w'
graph(0, 'EDS5 - original graph', red, 'o', '--')
graph(1, 'N5 - original graph', purple, 's', '--')
graph(2, 'EDS5 - decision graph', blue, '^', '--')
graph(3, 'N5 - decision graph', white, 'D', '--')
ax = fig.gca()
plt.title('Effects of Node Ordering on Routing Speed', color=white)
plt.xlabel('Effective $\\vert V\/\\vert$')
plt.ylabel('Routing Time (ms)')
plt.axes().set_axis_bgcolor('black')
ax.xaxis.label.set_color(white)
ax.yaxis.label.set_color(white)
ax.tick_params(axis='x', colors=white)
ax.tick_params(axis='y', colors=white)
ax.spines['bottom'].set_color(white)
ax.spines['top'].set_color(white)
ax.spines['left'].set_color(white)
ax.spines['right'].set_color(white)
legend = plt.legend(loc=0, numpoints=1, framealpha=0.0)
legend.get_frame().set_facecolor('k')
max_x = np.max(np.array(xs))
max_y = np.max(np.array(ys))
min_x = np.min(np.array(xs))
min_y = 0 - (max_y * 0.01)
min_x = 0 - (max_x * 0.01)
max_x *= 1.01
max_y *= 1.01
plt.axes().set_xlim([min_x, max_x])
plt.axes().set_ylim([min_y, max_y])
for text in legend.get_texts():
text.set_color(white)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.savefig('nodes_vs_routing_speed.png', transparent=True)
#plt.show() | gpl-3.0 |
q1ang/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_toolkits/axes_grid/examples/demo_parasite_axes2.py | 16 | 1208 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
| mit |
fmacias64/spyre | setup.py | 3 | 1217 | from setuptools import setup, find_packages
setup(
name='DataSpyre',
version='0.2.0',
description='Spyre makes it easy to build interactive web applications, and requires no knowledge of HTML, CSS, or Javascript.',
url='https://github.com/adamhajari/spyre',
author='Adam Hajari',
author_email='adam@nextbigsound.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: CherryPy',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Environment :: Web Environment',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='web application template data visualization',
include_package_data = True, # include everything in source control
packages = ['spyre'], # include all packages under src
package_data = {
'': ['*.js','*.css','*.html'],
'public': ['js/*.js','css/*.css'],
},
install_requires=[
"numpy",
"pandas",
"cherrypy",
"jinja2",
"matplotlib",
]
)
| mit |
davidgardenier/frbpoppy | tests/lognlogs/local.py | 1 | 1611 | """Check the log N log F slope of a local population."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation
from frbpoppy.population import unpickle
from tests.convenience import plot_aa_style, rel_path
MAKE = True
if MAKE:
population = CosmicPopulation.simple(1e5, generate=True)
survey = Survey('perfect')
surv_pop = SurveyPopulation(population, survey)
surv_pop.name = 'lognlogflocal'
surv_pop.save()
else:
surv_pop = unpickle('lognlogflocal')
# Get parameter
parms = surv_pop.frbs.fluence
min_p = min(parms)
max_p = max(parms)
# Bin up
min_f = np.log10(min(parms))
max_f = np.log10(max(parms))
log_bins = np.logspace(min_f, max_f, 50)
hist, edges = np.histogram(parms, bins=log_bins)
n_gt_s = np.cumsum(hist[::-1])[::-1]
# Calculate alpha
alpha, alpha_err, norm = surv_pop.frbs.calc_logn_logs(parameter='fluence',
min_p=min_p,
max_p=max_p)
print(alpha, alpha_err, norm)
xs = 10**((np.log10(edges[:-1]) + np.log10(edges[1:])) / 2)
xs = xs[xs >= min_p]
xs = xs[xs <= max_p]
ys = [norm*x**(alpha) for x in xs]
plot_aa_style()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.step(edges[:-1], n_gt_s, where='post')
plt.plot(xs, ys, linestyle='--',
label=rf'$\alpha$ = {alpha:.3} $\pm$ {round(abs(alpha_err), 2)}')
plt.xlabel('Fluence (Jy ms)')
plt.ylabel(r'N(${>}Fluence$)')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(rel_path('plots/logn_logf_local.pdf'))
| mit |
milankl/swm | calc/misc/c_diss_plot.py | 1 | 3966 | from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
import glob
import matplotlib.pyplot as plt
# OPTIONS
runfolder = [2,3]
## read data
for r,i in zip(runfolder,range(len(runfolder))):
runpath = path+'data/run%04i' % r
if i == 0:
u = np.load(runpath+'/u_sub.npy')
v = np.load(runpath+'/v_sub.npy')
h = np.load(runpath+'/h_sub.npy')
time = np.load(runpath+'/t_sub.npy')
print('run %i read.' % r)
else:
u = np.concatenate((u,np.load(runpath+'/u_sub.npy')))
v = np.concatenate((v,np.load(runpath+'/v_sub.npy')))
h = np.concatenate((h,np.load(runpath+'/h_sub.npy')))
time = np.hstack((time,np.load(runpath+'/t_sub.npy')))
print('run %i read.' % r)
t = time / 3600. / 24. # in days
## read param
global param
param = np.load(runpath+'/param.npy').all()
param['dat_type'] = np.float32
# import functions
exec(open(path+'swm_param.py').read())
exec(open(path+'swm_operators.py').read())
exec(open(path+'swm_output.py').read())
param['output'] = 0
set_grad_mat()
set_interp_mat()
set_lapl_mat()
set_coriolis()
tlen = len(time)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## reshape u,v
u = u.reshape((tlen,param['Nu'])).T
v = v.reshape((tlen,param['Nv'])).T
h = h.reshape((tlen,param['NT'])).T
print('Reshape done.')
##
dudx = Gux.dot(u)
dudy = Guy.dot(u)
dvdx = Gvx.dot(v)
dvdy = Gvy.dot(v)
n = 2
D = np.sqrt((dudx - dvdy)**2 + IqT.dot((dudy + dvdx)**2))
Ro = (D.T/f_T)
Rom = Ro.mean(axis=0)
c = (1/(1+Ro)**n).mean(axis=0)
# REYNOLDS, ROSSBY, EKMAN NUMBER MEAN
u_T = IuT.dot(u)
v_T = IvT.dot(v)
print('u,v interpolation done.')
#advective term
adv_u = u_T*Gux.dot(u) + v_T*IqT.dot(Guy.dot(u))
adv_v = u_T*IqT.dot(Gvx.dot(v)) + v_T*Gvy.dot(v)
del u_T,v_T
adv_term = np.sqrt(adv_u**2 + adv_v**2)
del adv_u, adv_v
print('Advection term done.')
#coriolis term
cor_term = (f_T*np.sqrt(IuT.dot(u**2) + IvT.dot(v**2)).T).T
print('Coriolis term done.')
Ro2 = adv_term / cor_term
c2 = (1/(1+Ro2)**n).mean(axis=1)
Ro2m = Ro2.mean(axis=1)
##
levs1 = np.linspace(0,.2,21)
levs2 = np.linspace(0.5,1,21)
fig,axs = plt.subplots(2,3,sharex=True,sharey=True,figsize=(9,5.5))
plt.tight_layout(rect=[-.02,-.03,1.12,.97],w_pad=0.1)
axs[0,0].contourf(param['x_T'],param['y_T'],h2mat(Ro2m),levs1)
axs[0,1].contourf(param['x_T'],param['y_T'],h2mat(Rom),levs1,extend='max')
m1 = axs[0,2].contourf(param['x_T'],param['y_T'],h2mat(Ro[-1,:]),levs1,extend='max')
plt.colorbar(m1,ax=(axs[0,0],axs[0,1],axs[0,2]),ticks=np.arange(0,.22,.04))
axs[1,0].contourf(param['x_T'],param['y_T'],h2mat(c2),levs2)
m21 = axs[1,0].contour(param['x_T'],param['y_T'],h2mat(c2),[0.8],linewidths=0.7)
axs[1,1].contourf(param['x_T'],param['y_T'],h2mat(c),levs2)
m2 = axs[1,2].contourf(param['x_T'],param['y_T'],h2mat(1/(1+Ro[-1,:])**n),levs2,extend='min')
axs[1,2].contour(param['x_T'],param['y_T'],h2mat(1/(1+Ro[-1,:])**n),[0.8],linewidths=0.7)
m22 = axs[1,1].contour(param['x_T'],param['y_T'],h2mat(c),[0.8],linewidths=0.7)
plt.colorbar(m2,ax=(axs[1,0],axs[1,1],axs[1,2]),ticks=np.arange(0.5,1.05,.05))
plt.clabel(m22, inline=1, fontsize=5,fmt='%.1f')
plt.clabel(m21, inline=1, fontsize=5,fmt='%.1f')
axs[0,0].set_xticks([])
axs[0,0].set_yticks([])
axs[0,0].set_title(r'$\overline{R_o} = \overline{\frac{|(\mathbf{u} \cdot \nabla)\mathbf{u}|}{|f\mathbf{u}|}}$')
axs[0,1].set_title(r'$\overline{R_o^*} = \overline{\frac{|D|}{f}}$')
axs[0,2].set_title(r'snapshot: $R_o^*$')
axs[1,0].set_title(r'$(1+\overline{R_o})^{-2}$')
axs[1,1].set_title(r'$(1+\overline{R_o}^*)^{-2}$')
axs[1,2].set_title(r'$(1+R_o^*)^{-2}$')
axs[0,0].set_ylabel('y')
axs[1,0].set_ylabel('y')
axs[1,0].set_xlabel('x')
axs[1,1].set_xlabel('x')
plt.savefig(path+'compare/Ro_scaling.png',dpi=150)
plt.close(fig)
#plt.show()
| gpl-3.0 |
schets/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <mr.phil.roth@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
analogdevicesinc/gnuradio | gr-analog/examples/fmtest.py | 40 | 7941 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import channels
import sys, math, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class fmtx(gr.hier_block2):
def __init__(self, lo_freq, audio_rate, if_rate):
gr.hier_block2.__init__(self, "build_fm",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
fmtx = analog.nbfm_tx(audio_rate, if_rate, max_dev=5e3, tau=75e-6)
# Local oscillator
lo = analog.sig_source_c(if_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
lo_freq, # frequency
1.0, # amplitude
0) # DC Offset
mixer = blocks.multiply_cc()
self.connect(self, fmtx, (mixer, 0))
self.connect(lo, (mixer, 1))
self.connect(mixer, self)
class fmtest(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._nsamples = 1000000
self._audio_rate = 8000
# Set up N channels with their own baseband and IF frequencies
self._N = 5
chspacing = 16000
freq = [10, 20, 30, 40, 50]
f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing]
self._if_rate = 4*self._N*self._audio_rate
# Create a signal source and frequency modulate it
self.sum = blocks.add_cc()
for n in xrange(self._N):
sig = analog.sig_source_f(self._audio_rate, analog.GR_SIN_WAVE, freq[n], 0.5)
fm = fmtx(f_lo[n], self._audio_rate, self._if_rate)
self.connect(sig, fm)
self.connect(fm, (self.sum, n))
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamples)
self.snk_tx = blocks.vector_sink_c()
self.channel = channels.channel_model(0.1)
self.connect(self.sum, self.head, self.channel, self.snk_tx)
# Design the channlizer
self._M = 10
bw = chspacing/2.0
t_bw = chspacing/10.0
self._chan_rate = self._if_rate / self._M
self._taps = filter.firdes.low_pass_2(1, self._if_rate, bw, t_bw,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
tpc = math.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
self.connect(self.channel, self.pfb)
# Create a file sink for each of M output channels of the filter and connect it
self.fmdet = list()
self.squelch = list()
self.snks = list()
for i in xrange(self._M):
self.fmdet.append(analog.nbfm_rx(self._audio_rate, self._chan_rate))
self.squelch.append(analog.standard_squelch(self._audio_rate*10))
self.snks.append(blocks.vector_sink_f())
self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i])
def num_tx_channels(self):
return self._N
def num_rx_channels(self):
return self._M
def main():
fm = fmtest()
tstart = time.time()
fm.run()
tend = time.time()
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 100000
fftlen = 8192
winfunc = scipy.blackman
# Plot transmitted signal
fs = fm._if_rate
d = fm.snk_tx.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
visible=False)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-120.0, 20.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-5, 5])
# Set up the number of rows and columns for plotting the subfigures
Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels())))
Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols))
if(fm.num_rx_channels() % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = fm._audio_rate
for i in xrange(len(fm.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = fm.snks[i].data()[Ns:Ne]
sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i)
X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
visible=False)
#X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
X_o = 10.0*scipy.log10(abs(X))
#f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+0.1])
sp2_f.set_ylim([-120.0, 20.0])
sp2_f.grid(True)
sp2_f.set_title(("Channel %d" % i), weight="bold")
sp2_f.set_xlabel("Frequency (kHz)")
sp2_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs_o
Tmax = len(d)*Ts
t_o = scipy.arange(0, Tmax, Ts)
x_t = scipy.array(d)
sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i)
p2_t = sp2_t.plot(t_o, x_t.real, "b")
p2_t = sp2_t.plot(t_o, x_t.imag, "r")
sp2_t.set_xlim([min(t_o), max(t_o)+1])
sp2_t.set_ylim([-1, 1])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
ghchinoy/tensorflow | tensorflow/contrib/timeseries/examples/known_anomaly.py | 24 | 7880 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of using an exogenous feature to ignore a known anomaly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/changepoints.csv")
def state_space_estimator(exogenous_feature_columns):
"""Constructs a StructuralEnsembleRegressor."""
def _exogenous_update_condition(times, features):
del times # unused
# Make exogenous updates sparse by setting an update condition. This in
# effect allows missing exogenous features: if the condition evaluates to
# False, no update is performed. Otherwise we sometimes end up with "leaky"
# updates which add unnecessary uncertainty to the model even when there is
# no changepoint.
return tf.equal(tf.squeeze(features["is_changepoint"], axis=-1), "yes")
return (
tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=12,
# Extract a smooth period by constraining the number of latent values
# being cycled between.
cycle_num_latent_values=3,
num_features=1,
exogenous_feature_columns=exogenous_feature_columns,
exogenous_update_condition=_exogenous_update_condition),
# Use truncated backpropagation with a window size of 64, batching
# together 4 of these windows (random offsets) per training step. Training
# with exogenous features often requires somewhat larger windows.
4, 64)
def autoregressive_estimator(exogenous_feature_columns):
input_window_size = 8
output_window_size = 2
return (
tf.contrib.timeseries.ARRegressor(
periodicities=12,
num_features=1,
input_window_size=input_window_size,
output_window_size=output_window_size,
exogenous_feature_columns=exogenous_feature_columns),
64, input_window_size + output_window_size)
def train_and_evaluate_exogenous(
estimator_fn, csv_file_name=_DATA_FILE, train_steps=300):
"""Training, evaluating, and predicting on a series with changepoints."""
# Indicate the format of our exogenous feature, in this case a string
# representing a boolean value.
string_feature = tf.feature_column.categorical_column_with_vocabulary_list(
key="is_changepoint", vocabulary_list=["no", "yes"])
# Specify the way this feature is presented to the model, here using a one-hot
# encoding.
one_hot_feature = tf.feature_column.indicator_column(
categorical_column=string_feature)
estimator, batch_size, window_size = estimator_fn(
exogenous_feature_columns=[one_hot_feature])
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
# Indicate the format of our CSV file. First we have two standard columns,
# one for times and one for values. The third column is a custom exogenous
# feature indicating whether each timestep is a changepoint. The
# changepoint feature name must match the string_feature column name
# above.
column_names=(tf.contrib.timeseries.TrainEvalFeatures.TIMES,
tf.contrib.timeseries.TrainEvalFeatures.VALUES,
"is_changepoint"),
# Indicate dtypes for our features.
column_dtypes=(tf.int64, tf.float32, tf.string),
# This CSV has a header line; here we just ignore it.
skip_header_lines=1)
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=batch_size, window_size=window_size)
estimator.train(input_fn=train_input_fn, steps=train_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Create an input_fn for prediction, with a simulated changepoint. Since all
# of the anomalies in the training data are explained by the exogenous
# feature, we should get relatively confident predictions before the indicated
# changepoint (since we are telling the model that no changepoint exists at
# those times) and relatively uncertain predictions after.
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features={
"is_changepoint": [["no"] * 49 + ["yes"] + ["no"] * 50]})))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
# Indicate the locations of the changepoints for plotting vertical lines.
anomaly_locations = []
with open(csv_file_name, "r") as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["is_changepoint"] == "yes":
anomaly_locations.append(int(row["time"]))
anomaly_locations.append(predictions["times"][49])
return (times, observed, all_times, mean, upper_limit, lower_limit,
anomaly_locations)
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit, anomaly_locations):
"""Plot the time series and anomalies in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.axvline(anomaly_locations[0], linestyle="dotted", label="changepoints")
for anomaly_location in anomaly_locations[1:]:
pyplot.axvline(anomaly_location, linestyle="dotted")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Ignoring a known anomaly (state space)",
*train_and_evaluate_exogenous(
estimator_fn=state_space_estimator))
make_plot("Ignoring a known anomaly (autoregressive)",
*train_and_evaluate_exogenous(
estimator_fn=autoregressive_estimator, train_steps=3000))
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_examples/api/custom_scale_example.py | 9 | 6401 | from __future__ import unicode_literals
import numpy as np
from numpy import ma
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from matplotlib.ticker import Formatter, FixedLocator
class MercatorLatitudeScale(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
source:
http://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the
# string used to select the scale. For example,
# ``gca().set_yscale("mercator")`` would be used to select this
# scale.
name = 'mercator'
def __init__(self, axis, **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and
``set_yscale`` will be passed along to the scale's
constructor.
thresh: The degree above which to crop the data.
"""
mscale.ScaleBase.__init__(self)
thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi)
if thresh >= np.pi / 2.0:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in ``ticker.py``.
In our case, the Mercator example uses a fixed locator from
-90 to 90 degrees and a custom formatter class to put convert
the radians to degrees and put a degree symbol after the
value::
"""
class DegreeFormatter(Formatter):
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
return "%d\u00b0" % ((x / np.pi) * 180.0)
deg2rad = np.pi / 180.0
axis.set_major_locator(FixedLocator(
np.arange(-90, 90, 10) * deg2rad))
axis.set_major_formatter(DegreeFormatter())
axis.set_minor_formatter(DegreeFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
"""
This transform takes an Nx1 ``numpy`` array and returns a
transformed copy. Since the range of the Mercator scale
is limited by the user-specified threshold, the input
array must be masked to contain only valid values.
``matplotlib`` will handle masked arrays and remove the
out-of-range data from the plot. Importantly, the
``transform`` method *must* return an array that is the
same shape as the input array, since these values need to
remain synchronized with values in the other dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
else:
return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a)))
def inverted(self):
"""
Override this method so matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.arctan(np.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(MercatorLatitudeScale)
if __name__ == '__main__':
import matplotlib.pyplot as plt
t = np.arange(-180.0, 180.0, 0.1)
s = t / 360.0 * np.pi
plt.plot(t, s, '-', lw=2)
plt.gca().set_yscale('mercator')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Mercator: Projection of the Oppressor')
plt.grid(True)
plt.show()
| apache-2.0 |
smsolivier/VEF | code/hlimit.py | 1 | 2247 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import ld as LD
import dd as DD
from hidespines import *
import sys
''' compares difference between Sn and moment equations as cell width --> 0 '''
if (len(sys.argv) > 1):
outfile = sys.argv[1]
else:
outfile = None
def getDiff(sol, tol=1e-6):
diff = np.zeros(len(sol))
for i in range(len(sol)):
x, phi, it = sol[i].sourceIteration(tol)
# diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
return diff
N = 100
n = 8
xb = 1
Sigmaa = lambda x: .1
Sigmat = lambda x: 1
q = lambda x, mu: 1
tol = 1e-10
N = np.logspace(1, 3, 5)
N = np.array([int(x) for x in N])
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=0) for x in N]
ed11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
diff00 = getDiff(ed00, tol)
diff01 = getDiff(ed01, tol)
diff10 = getDiff(ed10, tol)
diff11 = getDiff(ed11, tol)
diff20 = getDiff(ed20, tol)
diff21 = getDiff(ed21, tol)
fontsize=16
plt.loglog(xb/N, diff00, '-o', clip_on=False, label='MHFEM Edges, No Gauss')
plt.loglog(xb/N, diff01, '-o', clip_on=False, label='Maintain Slopes, No Gauss')
plt.loglog(xb/N, diff10, '-o', clip_on=False, label='MHFEM Edges, Gauss')
plt.loglog(xb/N, diff11, '-o', clip_on=False, label='Maintain Slopes, Gauss')
plt.loglog(xb/N, diff20, '-o', clip_on=False, label='vanLeer, No Gauss')
plt.loglog(xb/N, diff21, '-o', clip_on=False, label='vanLeer, Gauss')
plt.xlabel(r'$h$', fontsize=fontsize)
plt.ylabel('SN/MHFEM Convergence', fontsize=fontsize)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile, transparent=True)
else:
plt.show()
| mit |
legacysurvey/rapala | ninetyprime/linearitycheck.py | 2 | 17953 | #!/usr/bin/env python
import os
import glob
import numpy as np
import fitsio
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from astropy.table import Table
from bokpipe import *
from bokpipe.bokoscan import _convertfitsreg
def init_data_map(datadir,outdir,expTimes=None,files=None):
dataMap = {}
if not os.path.exists(outdir):
os.mkdir(outdir)
dataMap['outdir'] = outdir
if files is None:
dataMap['files'] = sorted(glob.glob(datadir+'*.fits') +
glob.glob(datadir+'*.fits.gz') +
glob.glob(datadir+'*.fits.fz'))
else:
dataMap['files'] = files
dataMap['rawFiles'] = dataMap['files']
dataMap['oscan'] = bokio.FileNameMap(outdir)
dataMap['proc'] = bokio.FileNameMap(outdir,'_p')
dataMap['files'] = [ dataMap['oscan'](f) for f in dataMap['files'] ]
if expTimes is None:
dataMap['expTime'] = np.array([fitsio.read_header(f)['EXPTIME']
for f in dataMap['files']])
else:
dataMap['expTime'] = expTimes
try:
# assume they are all the same
dataMap['dataSec'] = \
_convertfitsreg(fitsio.read_header(
dataMap['files'][0],'IM4')['DATASEC'])
except IOError:
pass
return dataMap
def process_data(dataMap,redo=True,withvar=True,oscanims=False,bias2d=False):
oscanSubtract = BokOverscanSubtract(output_map=dataMap['oscan'],
overwrite=redo,
write_overscan_image=oscanims,
oscan_cols_file=dataMap['outdir']+'oscan_cols',
oscan_rows_file=dataMap['outdir']+'oscan_rows',
verbose=10)#method='median_value')
oscanSubtract.process_files(dataMap['rawFiles'])
if bias2d:
biasname = 'bias'
biasStack = bokproc.BokBiasStack(#reject=None,
overwrite=redo,
with_variance=withvar)
bias2dFile = os.path.join(dataMap['outdir'],biasname+'.fits')
biasStack.stack(dataMap['biasFiles'],bias2dFile)
#imProcess = bokproc.BokCCDProcess(bias2dFile,
# output_map=dataMap['proc'])
#imProcess.process_files(flatFrames)
def imstat(dataMap,outfn='stats'):
from astropy.stats import sigma_clip
from scipy.stats import mode,scoreatpercentile
array_stats = bokutil.array_stats
fnlen = len(os.path.basename(dataMap['files'][0]))
st = np.zeros(len(dataMap['flatSequence']),
dtype=[('file','S%d'%fnlen),
('expTime','f4'),
('median','16f4'),
('mean','16f4'),
('mode','16f4'),
('iqr25','16f4'),
('iqr75','16f4'),
('iqr10','16f4'),
('iqr90','16f4')])
for _i,i in enumerate(dataMap['flatSequence']):
expTime = dataMap['expTime'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
print '%s %4.1f ' % (fn,expTime),
st['file'][_i] = fn
st['expTime'][_i] = expTime
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
modeVal,pix = array_stats(fits[extn].read()[dataMap['statsPix']],
method='mode',retArray=True)
st['mode'][_i,j] = modeVal
st['mean'][_i,j] = pix.mean()
st['median'][_i,j] = np.ma.median(pix)
st['iqr25'][_i,j] = scoreatpercentile(pix,25)
st['iqr75'][_i,j] = scoreatpercentile(pix,75)
st['iqr10'][_i,j] = scoreatpercentile(pix,10)
st['iqr90'][_i,j] = scoreatpercentile(pix,90)
print '%5d ' % (modeVal),
print
fitsio.write(outfn+'.fits',st,clobber=True)
def scaled_histograms(dataMap,nims=None,outfn='pixhist'):
pdf = PdfPages(outfn+'.pdf')
for _i,i in enumerate(dataMap['flatSequence']):
if nims is not None and _i==nims:
break
expTime = dataMap['expTime'][i]
expScale = dataMap['refExpTime'] / expTime
print dataMap['files'][i]
fn = os.path.basename(dataMap['files'][i])
fits = fitsio.FITS(dataMap['files'][i])
fig = plt.figure(figsize=(8.0,10))
plt.subplots_adjust(0.08,0.08,0.92,0.92,0.3,0.35)
for j,extn in enumerate(['IM%d' % n for n in range(1,17)]):
ax = plt.subplot(8,2,j+1)
pix = fits[extn].read()[dataMap['statsPix']]
ax.hist(expScale*pix.flatten(),100,(0,40000),edgecolor='none')
ax.text(0.05,0.9,extn,va='top',size=9,transform=ax.transAxes)
ax.set_xlim(0,40000)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10000))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2000))
ax.yaxis.set_major_locator(ticker.MultipleLocator(50000))
plt.figtext(0.5,0.99,fn+' exp=%.1f' % expTime,ha='center',va='top')
pdf.savefig(fig)
plt.close(fig)
pdf.close()
def plot_sequence(dataMap,st,imNum,which='median'):
expScale = dataMap['refExpTime']/st['expTime']
seqno = 1 + np.arange(len(st))
ref = np.isclose(expScale,1.0)
j = imNum - 1
plt.figure(figsize=(8,6))
plt.subplots_adjust(0.11,0.08,0.96,0.95)
plt.errorbar(seqno[ref],expScale[ref]*st[which][ref,j],
[expScale[ref]*(st[which]-st['iqr10'])[ref,j],
expScale[ref]*(st['iqr90']-st[which])[ref,j]],
fmt='bs-')
plt.errorbar(seqno[~ref],expScale[~ref]*st[which][~ref,j],
[expScale[~ref]*(st[which]-st['iqr10'])[~ref,j],
expScale[~ref]*(st['iqr90']-st[which])[~ref,j]],
fmt='cs-')
#plt.scatter(seqno,expScale*st['mode'][:,j],marker='+',c='r')
#plt.scatter(seqno,expScale*st['mean'][:,j],marker='x',c='g')
plt.xlabel('sequence number')
plt.ylabel('counts scaled by exp time')
plt.title('IM%d'%imNum)
plt.xlim(0.5,len(st)+0.5)
def fit_ref_exposures(dataMap,st,imNum,
which='median',method='spline',doplot=False):
from scipy.interpolate import UnivariateSpline
seqno = 1 + np.arange(len(st))
t = st['expTime']
ref = np.isclose(t,dataMap['refExpTime'])
j = imNum - 1
refCounts = st[which][ref,j][0]
if method=='linear':
_fit = np.polyfit(seqno[ref],refCounts/st[which][ref,j],1)
fit = lambda x: np.polyval(_fit,x)
elif method=='spline':
fit = UnivariateSpline(seqno[ref],refCounts/st[which][ref,j],
s=1e-5,k=3)
else:
raise ValueError
if doplot:
plt.figure()
plt.subplot(211)
plt.plot(seqno[ref],st[which][ref,j],'bs-')
plt.plot(seqno,refCounts/fit(seqno),c='r')
plt.subplot(212)
plt.plot(seqno[ref],(st[which][ref,j]-refCounts/fit(seqno[ref]))
/st[which][ref,j],'bs-')
plt.axhline(0,c='r')
return fit
def plot_linearity_curves(dataMap,st,which='median',correct=True,isPTC=False,
refCor=None,fitmethod='spline',outfn='linearity',
onlyim=None):
seqno = 1 + np.arange(len(st))
t = st['expTime']
print seqno,t
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
# only use the increasing sequence, not the reference exposures
ii = ii[~ref]
if isPTC:
# for PTCs skip every other image since they are done in pairs
ii = ii[::2]
# only fit to unsaturated frames
try:
firstsat = np.where(np.any(st[which][ii,:] > 55000,axis=1))[0][0]
except IndexError:
firstsat = -1
if onlyim is None:
pdf = PdfPages(outfn+'.pdf')
for imNum in range(1,17):
if onlyim is not None and imNum != onlyim:
continue
j = imNum - 1
# correct lamp variation
if correct:
if refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[ii[:firstsat]],
fscl[ii[:firstsat]]*st[which][ii[:firstsat],j],1)
fitv = np.polyval(fit,t)
slope = fit[0] / (st[which][ref,j][0]/refExpTime)
#
pltindex = imNum % 4
if onlyim is None:
if pltindex == 1:
fig = plt.figure(figsize=(8,10))
plt.subplots_adjust(0.11,0.08,0.96,0.95,0.25,0.2)
ax = plt.subplot(4,2,2*(j%4)+1)
else:
fig = plt.figure(figsize=(6,2.5))
plt.subplots_adjust(0.11,0.23,0.99,0.98,0.35,0.2)
ax = plt.subplot(1,2,1)
plt.plot(t[ii],fscl[ii]*st[which][ii,j],'bs-')
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
plt.ylim(1e2,9e4)
plt.yscale('log')
plt.ylabel('counts [%s]' % which)
tt = np.logspace(-1,np.log10(1.3*t.max()),100)
plt.plot(tt,np.polyval(fit,tt),c='r')
plt.text(0.05,0.9,'IM%d'%imNum,va='top',transform=ax.transAxes)
plt.text(0.95,0.18,r'y = %.1f $\times$ t + %.1f' % tuple(fit),
ha='right',va='top',size=9,transform=ax.transAxes)
plt.text(0.95,0.10,r'y = %.3f $\times$ counts + %.1f' % (slope,fit[1]),
ha='right',va='top',size=9,transform=ax.transAxes)
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
#
if onlyim is None:
ax = plt.subplot(4,2,2*(j%4)+2)
else:
ax = plt.subplot(1,2,2)
plt.plot(t[ii],100*(fscl[ii]*st[which][ii,j]-fitv[ii])/fitv[ii],'bs-')
plt.axhline(0,c='r')
#ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
#ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(2))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.5))
plt.ylim(-5,5)
plt.xlim(0.9*t.min(),t.max()+0.5)
plt.xscale('log')
if pltindex==0 or onlyim is not None:
plt.xlabel('exptime (s)')
plt.ylabel('residual \%')
if onlyim is None:
if pltindex == 0:
pdf.savefig(fig)
plt.close(fig)
if onlyim is None:
pdf.close()
def rel_gain(dataMap,st,which='median',correct=True,fitmethod='spline',
nskip=0):
seqno = 1 + np.arange(len(st))
t = st['expTime']
refExpTime = dataMap['refExpTime']
ref = np.isclose(t,refExpTime)
refCorFit = None
ii = np.arange(len(st))
ii = ii[~ref]
ii = ii[nskip:]
sky4 = st[which][ii,3]
fit_ii = ii[np.where((sky4>5000)&(sky4<25000))[0]]
plt.figure()
for imNum in range(1,17):
j = imNum - 1
# correct lamp variation
if correct:
if True: #refCor is None:
fscl_fit = fit_ref_exposures(dataMap,st,imNum,which,
method=fitmethod)
else:
if refCorFit is None:
refCorFit = fit_ref_exposures(dataMap,st,imNum,which)
fscl_fit = refCorFit
fscl = fscl_fit(seqno)
else:
fscl = np.ones_like(seqno)
fit = np.polyfit(t[fit_ii],fscl[fit_ii]*st[which][fit_ii,j],1)
fitv = np.polyval(fit,t)
# slope = fit[0] / (st[which][ref,j][0]/refExpTime)
xx = np.array(0,1.1*t.max())
plt.subplot(4,4,imNum)
if False:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j])
plt.plot(xx,np.polyval(fit,xx),c='r')
else:
plt.scatter(t[ii],fscl[ii]*st[which][ii,j]/fitv[ii])
plt.axhline(1,c='r')
plt.ylim(0.7,1.3)
if True:
plt.xscale('log')
plt.xlim(0.9*t.min(),1.1*t.max())
def get_first_saturated_frame(seq):
try:
firstsat = np.where(seq > 55000)[0][0]
except IndexError:
firstsat = -1
return firstsat
def compare_oscan_levels(dataMap,st):
files = [ dataMap['files'][i] for i in dataMap['flatSequence'] ]
oscans = np.zeros((len(files),16))
for j in range(16):
oscans[:,j] = [ fitsio.read_header(f,'IM%d'%(j+1))['OSCANMED']
for f in files ]
seqno = 1 + np.arange(len(st))
plt.figure()
for j in range(8,16):
ax = plt.subplot(8,2,2*(j%8)+1)
i1 = get_first_saturated_frame(st['median'][:,j])
plt.scatter(st['median'][:i1,j],oscans[:i1,j],c='b')
plt.ylabel('IM%d'%(j+1))
ax = plt.subplot(8,2,2*(j%8)+2)
plt.scatter(seqno[:i1],oscans[:i1,j],c='b')
def init_sep09bss_data_map():
datadir = os.environ.get('BASSDATA')+'/20150909/bss/20150908/'
exptimes = np.loadtxt(datadir+'../bss.20150909.log',usecols=(3,))
exptimes = exptimes[50:]
print exptimes
rdxdir = os.environ.get('GSCRATCH','tmp_sep')+'/bss_sep09/'
if not os.path.exists(rdxdir):
os.makedirs(rdxdir)
dataMap = init_data_map(datadir,rdxdir,
expTimes=exptimes,files=None)
dataMap['rawFiles'] = dataMap['rawFiles'][50:]
dataMap['files'] = dataMap['files'][50:]
dataMap['biasFiles'] = dataMap['files'][-5:]
#dataMap['flatSequence'] = range(50,68)
dataMap['flatSequence'] = range(18)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 40.0
return dataMap
def init_sep29ptc_data_map():
dataMap = init_data_map(
"/home/ian/dev/rapala/bokpipe/scratch/sep29ptcs/ptc/",'sep29ptcs/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = np.s_[20:-20,100:-100]
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct02ptc_data_map():
dataMap = init_data_map(os.environ.get('GSCRATCH')+'/02oct15/ptc/',
os.environ.get('GSCRATCH')+'/02oct15/ptc_proc/')
dataMap['biasFiles'] = [dataMap['files'][0],]
dataMap['flatSequence'] = range(1,len(dataMap['files']))
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 10.0
return dataMap
def init_oct20_data_map():
datadir = os.environ.get('BASSDATA')+'/20151020/'
exptimes = np.loadtxt(datadir+'images.log',usecols=(6,))
nuse = 53
exptimes = exptimes[:nuse]
print exptimes
dataMap = init_data_map(datadir,'tmp_oct20',expTimes=exptimes)
dataMap['rawFiles'] = dataMap['rawFiles'][:nuse]
dataMap['files'] = [ dataMap['oscan'](f)
for f in dataMap['files'][:nuse] ]
dataMap['biasFiles'] = dataMap['files'][:20]
dataMap['flatSequence'] = range(20,nuse)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov11g_data_map():
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
exptimes = log['expTime'][111:150]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[111:150] ]
dataMap = init_data_map(datadir,'tmp_nov11g',
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = 3.0
return dataMap
def init_nov14_data_map(filt):
datadir = os.environ.get('BASSDATA')+'/Nov2015/'
log = Table.read(datadir+'bassLog_Nov2015.fits')
if filt=='g':
frames = np.r_[np.s_[297:345],np.s_[247:257]]
else:
frames = np.r_[np.s_[345:393],np.s_[247:257]]
exptimes = log['expTime'][frames]
files = [ datadir+f['utDir']+'/'+f['fileName']+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_nov14'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_jan3_data_map(filt):
datadir = os.environ.get('BASSDATA')
log = Table.read('basslogs/log_ut20160103.fits')
if filt=='g':
frames = np.r_[np.s_[57:105],np.s_[160:170]]
else:
frames = np.r_[np.s_[105:160],np.s_[160:170]]
exptimes = log['expTime'][frames]
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log[frames] ]
dataMap = init_data_map(datadir,'tmp_jan3'+filt,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = dataMap['files'][-10:]
dataMap['flatSequence'] = np.arange(len(dataMap['files'])-10)
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
dataMap['refExpTime'] = {'Ha':10.0,'g':3.0}[filt]
return dataMap
def init_data_map_fromfile(filename,outdir='tmp',nersc=True):
datadir = os.environ.get('BASSDATA')
if nersc:
datadir = os.path.join(datadir,'BOK_Raw')
log = np.loadtxt(filename,dtype=[('frameNum','i4'),('utDir','S8'),
('fileName','S35'),
('imType','S10'),('filter','S8'),
('expTime','f4')],skiprows=1)
exptimes = log['expTime']
files = [ datadir+'/'+f['utDir'].strip()+'/'+f['fileName'].strip()+'.fits'
for f in log ]
if nersc:
files = [ f+'.fz' for f in files ]
dataMap = init_data_map(datadir,outdir,
expTimes=exptimes,files=files)
dataMap['biasFiles'] = np.array(dataMap['files'])[log['imType']=='zero']
dataMap['flatSequence'] = np.where(log['imType']=='flat')[0]
dataMap['statsPix'] = bokutil.stats_region('amp_corner_ccdcenter_small')
# assume it starts with reference
dataMap['refExpTime'] = exptimes[dataMap['flatSequence'][0]]
return dataMap
if __name__=='__main__':
import sys
dataset = sys.argv[1]
if dataset == 'sep09bss':
dataMap = init_sep09bss_data_map()
elif dataset == 'oct02':
dataMap = init_oct02ptc_data_map()
elif dataset == 'oct20':
dataMap = init_oct20_data_map()
elif dataset == 'nov11g':
dataMap = init_nov11g_data_map()
elif dataset == 'nov14g':
dataMap = init_nov14_data_map('g')
elif dataset == 'nov14Ha':
dataMap = init_nov14_data_map('Ha')
elif dataset == 'jan3g':
dataMap = init_jan3_data_map('g')
elif dataset == 'jan3Ha':
dataMap = init_jan3_data_map('Ha')
else:
dataMap = init_data_map_fromfile(sys.argv[2],dataset)
print 'processing ',dataset
if not os.path.exists('stats_'+dataset+'.fits'):
process_data(dataMap,bias2d=True)
imstat(dataMap,outfn='stats_'+dataset)
st = fitsio.read('stats_'+dataset+'.fits')
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset)
if True:
plot_linearity_curves(dataMap,st,outfn='linearity_'+dataset,
onlyim=4)
plt.savefig('linearity_IM4_%s.png'%dataset)
plot_sequence(dataMap,st,4)
plt.savefig('linsequence_IM4_%s.png'%dataset)
| bsd-3-clause |
Jailander/COSMOS | kriging_exploration/scripts/explorator.py | 1 | 34183 | #!/usr/bin/env python
import cv2
import sys
import yaml
import signal
import numpy as np
#import utm
import matplotlib as mpl
import matplotlib.cm as cm
import rospy
import argparse
import actionlib
from cosmos_msgs.msg import KrigInfo
from cosmos_msgs.srv import CompareModels
import kriging_exploration.map_coords
import std_msgs.msg
import open_nav.msg
from kriging_exploration.data_grid import DataGrid
from kriging_exploration.map_coords import MapCoords
from kriging_exploration.visualiser import KrigingVisualiser
from kriging_exploration.canvas import ViewerCanvas
from kriging_exploration.topological_map import TopoMap
from kriging_exploration.exploration import ExplorationPlan
from sensor_msgs.msg import NavSatFix
def overlay_image_alpha(img, img_overlay):
"""Overlay img_overlay on top of img at the position specified by
pos and blend using alpha_mask.
"""
show_image = img.copy()
alpha = img_overlay[:, :, 3] / 255.0 # Alpha mask must contain values
# within the range [0, 1]
# and be the same size as img_overlay.
# Image ranges
y1, y2 = 0, img.shape[0]
x1, x2 = 0, img.shape[1]
channels = img.shape[2]
alpha_inv = 1.0 - alpha
for c in range(channels):
show_image[y1:y2, x1:x2, c] = (alpha * img_overlay[y1:y2, x1:x2, c] + alpha_inv * img[y1:y2, x1:x2, c])
return show_image
class Explorator(KrigingVisualiser):
#_w_shape=[(0, 16), (1, 17), (3, 17), (5, 16), (8, 15), (10, 15), (12, 14), (14, 13), (12, 12), (10, 11), (8, 11), (5, 10), (8, 9), (10, 9), (12, 8), (14, 7), (12, 6), (10, 5), (8, 5), (6, 4), (4, 3), (3, 2), (4, 1), (5, 0), (7, 0)]
#_w_shape=[(17, 0), (17, 1), (17, 3), (16, 5), (15, 8), (15, 10), (14, 12), (13, 14), (12, 12), (11, 10), (11, 8), (10, 5), (9, 8), (9, 10), (8, 12), (7, 14), (6, 12), (5, 10), (5, 8), (4, 6), (3, 4), (2, 3), (1, 4), (0, 5), (0, 7)]
#_w_shape=[(17, 0), (17,1), (17, 2), (17, 4), (16, 4), (16, 6), (16, 8), (15, 8), (15, 10), (14, 10), (14, 12), (13, 12), (13, 14), (12, 14), (12, 12), (11, 12), (11, 10), (10, 10), (10, 8), (10, 6), (10, 4), (9, 4), (9, 6), (9, 8), (9, 10), (8, 10), (8, 12), (7, 12), (7, 14), (6, 14), (6, 12), (5, 12), (5, 10), (4, 10), (4, 8), (4, 6), (4, 4), (3, 4), (3, 3), (2, 3), (2, 4), (1,4), (1, 6), (0,6), (1, 8), (0,8), (1, 10), (0, 10), (0, 12), (0, 14)]
_w_shape=[(17, 0), (16, 1), (14, 6), (12, 11), (10, 14), (8, 9), (5, 14), (3, 11), (2, 6), (0, 3)]
def __init__(self, lat_deg, lon_deg, zoom, size, args):
self.targets = []
self.results =[]
self.result_counter=0
self.explodist=0
self.running = True
self.last_coord=None
signal.signal(signal.SIGINT, self.signal_handler)
self.expid=args.experiment_name
print "Creating visualiser object"
super(Explorator, self).__init__(lat_deg, lon_deg, zoom, size)
cv2.namedWindow('explorator')
cv2.setMouseCallback('explorator', self.click_callback)
self.current_model=-1
self.draw_mode = 'none'
self.grid = DataGrid(args.limits_file, args.cell_size)
self.topo_map= TopoMap(self.grid)
self.visited_wp=[]
explo_type = args.area_coverage_type
self.define_exploration_type(explo_type)
self.navigating = False
self.pause_exp = False
self.exploring = 0
self.n_inputs = 0
print "NUMBER OF TARGETS:"
print len(self.explo_plan.targets)
self.limits_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.grid_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.exploration_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.gps_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.limits_canvas.draw_polygon(self.grid.limits, (0,0,255,128), thickness=1)
self.grid_canvas.draw_grid(self.grid.cells, args.cell_size, (128,128,128,2), thickness=1)
self.redraw()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
self.model_canvas=[]
self.model_legend=[]
self.kriging_canvas=[]
self.klegend_canvas=[]
self.klegend2_canvas=[]
self.klegend3_canvas=[]
self.sigma_canvas=[]
self.sigma2_canvas=[]
self.model_canvas_names=[]
self.mean_out_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_out_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_var_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
self.mean_dev_legend_canvas = ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res)
rospy.loginfo("Subscribing to Krig Info")
rospy.Subscriber("/kriging_data", KrigInfo, self.data_callback)
rospy.Subscriber("/fix", NavSatFix, self.gps_callback)
rospy.Subscriber('/penetrometer_scan', std_msgs.msg.String, self.scan_callback)
self.req_data_pub = rospy.Publisher('/request_scan', std_msgs.msg.String, latch=False, queue_size=1)
rospy.loginfo(" ... Connecting to Open_nav")
self.open_nav_client = actionlib.SimpleActionClient('/open_nav', open_nav.msg.OpenNavAction)
self.open_nav_client.wait_for_server()
rospy.loginfo(" ... done")
tim1 = rospy.Timer(rospy.Duration(0.2), self.drawing_timer_callback)
tim2 = rospy.Timer(rospy.Duration(0.1), self.control_timer_callback)
self.refresh()
while(self.running):
cv2.imshow('explorator', self.show_image)
k = cv2.waitKey(20) & 0xFF
self._change_mode(k)
tim1.shutdown()
tim2.shutdown()
cv2.destroyAllWindows()
sys.exit(0)
# EXPLORATION PARAMS HERE!!!!
def define_exploration_type(self, explo_type):
self.exploration_strategy=explo_type
self.n_goals=10
if explo_type=='area_split':
self.grid._split_area(3,3)
sb=[]
for i in self.grid.area_splits_coords:
(y, x) = self.grid.get_cell_inds_from_coords(i)
sb.append((x,y))
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=sb)
elif explo_type=='random':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent)
elif explo_type=='w_shape':
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, ac_model=explo_type, ac_coords=self._w_shape)
else: #greedy
self.explo_plan = ExplorationPlan(self.topo_map, args.initial_waypoint, args.initial_percent, exploration_type='greedy', ac_model=explo_type)
def drawing_timer_callback(self, event):
self.refresh()
def control_timer_callback(self, event):
if self.navigating:
if self.open_nav_client.simple_state ==2:
print "DONE NAVIGATING"
self.navigating = False
if self.exploring==1:
self.exploring=2
elif self.exploring==2:
if not self.pause_exp:
self.explo_plan.explored_wp.append(self.explo_plan.route.pop(0))
info_str='Do_reading'
self.req_data_pub.publish(info_str)
self.exploring=3
elif self.exploring==4:
if not self.pause_exp:
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
else:
print "Done Exploring"
self.exploring = 0
# else:
# if self.exploring:
# print "waiting for new goal"
def gps_callback(self, data):
if not np.isnan(data.latitude):
self.gps_canvas.clear_image()
gps_coord = MapCoords(data.latitude,data.longitude)
self.gps_canvas.draw_coordinate(gps_coord,'black',size=2, thickness=2, alpha=255)
if self.last_coord:
dist = gps_coord - self.last_coord
self.explodist+= dist[0]
self.last_coord=gps_coord
def data_callback(self, msg):
point_coord = kriging_exploration.map_coords.coord_from_satnav_fix(msg.coordinates)
for i in msg.data:
self.grid.add_data_point(i.model_name, point_coord, i.measurement)
self.vmin, self.vmax = self.grid.get_max_min_vals()
self.n_models=len(self.grid.models)
for i in self.grid.models:
if i.name not in self.model_canvas_names:
print i.name
self.model_canvas_names.append(i.name)
self.model_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.model_legend.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.kriging_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.klegend3_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.sigma2_canvas.append(ViewerCanvas(self.base_image.shape, self.satellite.centre, self.satellite.res))
self.draw_inputs(self.model_canvas_names.index(i.name))
self.n_inputs+=1
if self.exploring==3:
if self.n_inputs>3:
self.krieg_all_mmodels()
rospy.sleep(0.1)
self.grid.calculate_mean_grid()
rospy.sleep(0.1)
self.draw_means()
self.draw_mode="means"
resp = self.get_errors()
self.result_counter+=1
d={}
d['step']=self.result_counter
d['id']=self.expid
d['ns']=len(self.explo_plan.targets)
d['coord']={}
d['coord']['lat']=self.last_coord.lat
d['coord']['lon']=self.last_coord.lon
d['dist']=float(self.explodist)
d['results']={}
d['results']['groundtruth']=resp
d['results']['var']={}
d['results']['var']['mean']={}
d['results']['var']['mean']['mean']= float(np.mean(self.grid.mean_variance))
d['results']['var']['mean']['max']= float(np.max(self.grid.mean_variance))
d['results']['var']['mean']['min']= float(np.min(self.grid.mean_variance))
# d['results']['var']['std']['mean']= np.mean(self.grid.mean_deviation)
# d['results']['var']['std']['max']= np.max(self.grid.mean_deviation)
# d['results']['var']['std']['min']= np.min(self.grid.mean_deviation)
means=[]
maxs=[]
mins=[]
for i in range(self.n_models):
means.append(float(np.mean(self.grid.models[i].variance)))
maxs.append(float(np.max(self.grid.models[i].variance)))
mins.append(float(np.min(self.grid.models[i].variance)))
d['results']['models']={}
d['results']['models']['means']=means
d['results']['models']['maxs']=maxs
d['results']['models']['mins']=mins
rospy.sleep(0.1)
self.results.append(d)
if self.exploration_strategy == 'greedy':
nwp = len(self.explo_plan.route) + len(self.explo_plan.explored_wp)
print nwp, " nodes in plan"
if nwp <= self.n_goals:
#THIS IS the ONE
#self.explo_plan.add_limited_greedy_goal(self.grid.mean_variance, self.last_coord)
self.explo_plan.add_greedy_goal(self.grid.mean_variance)
#self.explo_plan.add_montecarlo_goal(self.grid.mean_variance, self.last_coord)
#self.draw_mode="deviation"
# self.current_model=0
# if self.redraw_devi:
# self.draw_all_devs()
self.redraw()
rospy.sleep(0.1)
self.exploring=4
def scan_callback(self, msg):
if msg.data == 'Reading':
print "GOT READING!!!"
cx, cy = self.grid.get_cell_inds_from_coords(self.last_coord)
if cx <0 or cy<0:
print "Reading outside the grid"
else:
print 'Reading at: ', cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print 'Setting: ', i.name, i.coord, "as Visited"
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
def refresh(self):
#self.show_image = self.image.copy()
#self.show_image = cv2.addWeighted(self.gps_canvas.image, 0.7, self.image, 1.0, 0)
#self.show_image = transparentOverlay(self.image, self.gps_canvas.image)
self.show_image = overlay_image_alpha(self.image,self.gps_canvas.image)
def redraw(self):
self.image = cv2.addWeighted(self.grid_canvas.image, 0.5, self.base_image, 1.0, 0)
self.image = cv2.addWeighted(self.limits_canvas.image, 0.75, self.image, 1.0, 0)
self.image = cv2.addWeighted(self.exploration_canvas.image, 0.75, self.image, 1.0, 0)
if self.draw_mode == "inputs" and self.current_model>=0 :
self.image = cv2.addWeighted(self.model_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.model_legend[self.current_model].image)
if self.draw_mode == "kriging":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.kriging_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend_canvas[self.current_model].image)
if self.draw_mode == "deviation":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend3_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend3_canvas[self.current_model].image)
if self.draw_mode == "variance":# and self.current_model>=0 :
self.image = cv2.addWeighted(self.sigma2_canvas[self.current_model].image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.klegend2_canvas[self.current_model].image)
if self.draw_mode == "means":
self.image = cv2.addWeighted(self.mean_dev_canvas.image, 0.75, self.image, 1.0, 0)
#self.image = cv2.addWeighted(self.klegend2_canvas[self.current_model].image, 1.0, self.image, 1.0, 0)
self.image = overlay_image_alpha(self.image, self.mean_dev_legend_canvas.image)
self.show_image = self.image.copy()
def click_callback(self, event, x, y, flags, param):
if event == cv2.EVENT_RBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
print i.name, i.coord.easting, i.coord.northing
i.visited= True
self.visited_wp.append(i)
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
if event == cv2.EVENT_LBUTTONDOWN:
click_coord = self.satellite._pix2coord(x,y)
cx, cy = self.grid.get_cell_inds_from_coords(click_coord)
if cx <0 or cy<0:
print "click outside the grid"
else:
print cx, cy
for i in self.topo_map.waypoints:
if (cy,cx) == i.ind:
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
#goal.goal.goal.header.
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=i.coord.lat
targ.goal.coords.longitude=i.coord.lon
print targ
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
#self.client.wait_for_result()
# Prints out the result of executing the action
#ps = self.client.get_result()
#print ps
def draw_inputs(self, nm):
minv = self.grid.models[nm].lims[0]
maxv = self.grid.models[nm].lims[1]
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.model_canvas[nm].clear_image()
self.model_legend[nm].clear_image()
for i in self.grid.models[nm].orig_data:
cell = self.grid.cells[i.y][i.x]
a= colmap.to_rgba(int(i.value))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.model_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.model_canvas[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].put_text(self.grid.models[nm].name)
self.model_legend[nm].draw_legend(minv, maxv, colmap, title="Kriging")
def draw_krigged(self, nm):
print "drawing kriging" + str(nm)
minv = self.grid.models[nm].min_val
maxv = self.grid.models[nm].max_val
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.kriging_canvas[nm].clear_image()
self.klegend_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].output[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.kriging_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend_canvas[nm].draw_legend(minv, maxv, colmap, title="Kriging")
self.redraw()
def draw_variance(self, nm):
print "drawing variance" + str(nm)
minv = self.grid.models[nm].min_var
maxv = self.grid.models[nm].max_var
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax= maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend2_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].variance[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma2_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend2_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend2_canvas[nm].draw_legend(minv, maxv, colmap, title="Variance")
self.redraw()
def draw_means(self):
print "drawing mean deviation ..."
minv = self.grid.min_mean_deviation
maxv = self.grid.max_mean_deviation
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.mean_dev_canvas.clear_image()
self.mean_dev_legend_canvas.clear_image()
for i in range(self.grid.shape[0]):
for j in range(self.grid.shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.mean_deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.mean_dev_canvas.draw_cell(cell, self.grid.cell_size, b, thickness=-1)
#self.mean_dev_legend_canvas.put_text(self.grid.models[nm].name)
self.mean_dev_legend_canvas.draw_legend(minv, maxv, colmap, title="Mean Deviation")
#self.draw_mode="means"
self.redraw()
def draw_deviation(self, nm):
print "drawing deviation" + str(nm)
minv = self.grid.models[nm].min_dev
maxv = self.grid.models[nm].max_dev
if (maxv-minv) <=1:
maxv = maxv + 50
minv = minv - 50
norm = mpl.colors.Normalize(vmin=minv, vmax=maxv)
cmap = cm.jet
colmap = cm.ScalarMappable(norm=norm, cmap=cmap)
self.sigma_canvas[nm].clear_image()
self.klegend3_canvas[nm].clear_image()
for i in range(self.grid.models[nm].shape[0]):
for j in range(self.grid.models[nm].shape[1]):
cell = self.grid.cells[i][j]
a= colmap.to_rgba(int(self.grid.models[nm].deviation[i][j]))
b= (int(a[2]*255), int(a[1]*255), int(a[0]*255), int(a[3]*50))
self.sigma_canvas[nm].draw_cell(cell, self.grid.cell_size, b, thickness=-1)
self.klegend3_canvas[nm].put_text(self.grid.models[nm].name)
self.klegend3_canvas[nm].draw_legend(minv, maxv, colmap, title="Deviation")
self.redraw()
def krieg_all_mmodels(self):
for i in self.grid.models:
i.do_krigging()
self.redraw_kriged=True
self.redraw_var=True
self.redraw_devi=True
def draw_all_outputs(self):
for i in self.grid.models:
self.draw_krigged(self.model_canvas_names.index(i.name))
self.redraw_kriged=False
def draw_all_vars(self):
for i in self.grid.models:
self.draw_variance(self.model_canvas_names.index(i.name))
self.redraw_var=False
def draw_all_devs(self):
for i in self.grid.models:
self.draw_deviation(self.model_canvas_names.index(i.name))
self.redraw_devi=False
def _change_mode(self, k):
if k == 27:
self.running = False
elif k == ord('q'):
self.running = False
elif k == ord('n'):
print len(self.grid.models)
elif k == ord('i'):
if self.n_models > 0:
self.draw_mode="inputs"
self.current_model=0
self.redraw()
elif k == ord('d'):
if self.n_models > 0:
self.draw_mode="deviation"
self.current_model=0
if self.redraw_devi:
self.draw_all_devs()
self.redraw()
elif k == ord('v'):
if self.n_models > 0:
self.draw_mode="variance"
self.current_model=0
if self.redraw_var:
self.draw_all_vars()
self.redraw()
elif k == ord('t'):
self.krieg_all_mmodels()
self.grid.calculate_mean_grid()
if self.n_models > 0:
self.draw_all_outputs()
self.draw_mode="kriging"
self.current_model=0
self.redraw()
elif k == ord('k'):
if self.n_models > 0:
self.draw_mode="kriging"
self.current_model=0
if self.redraw_kriged:
self.draw_all_outputs()
self.redraw()
elif k == ord('>'):
self.current_model+=1
if self.current_model >= self.n_models:
self.current_model=0
self.redraw()
elif k == ord('<'):
self.current_model-=1
if self.current_model < 0:
self.current_model=self.n_models-1
self.redraw()
elif k == ord('w'):
self.grid_canvas.draw_waypoints(self.topo_map.waypoints, (0,255,0,2), thickness=1)
self.grid_canvas.draw_waypoints(self.visited_wp, (0,0,255,2), thickness=1)
self.redraw()
elif k == ord('e'):
self.exploration_canvas.draw_waypoints(self.explo_plan.targets, (255,200,128,255), thickness=3)
self.exploration_canvas.draw_plan(self.explo_plan.route, 'cyan', thickness=1)
self.redraw()
#xnames = [x.name for x in self.explo_plan.route]
#print xnames
elif k == ord('g'):
if len(self.explo_plan.route) >0:
gg=self.explo_plan.route[0]
self.open_nav_client.cancel_goal()
targ = open_nav.msg.OpenNavActionGoal()
targ.goal.coords.header.stamp=rospy.Time.now()
targ.goal.coords.latitude=gg.coord.lat
targ.goal.coords.longitude=gg.coord.lon
print "Going TO: ", gg
self.exploring=1
self.navigating=True
self.open_nav_client.send_goal(targ.goal)
self.result_counter=0
self.explodist=0
else:
print "Done Exploring"
self.exploring = 0
elif k == ord('y'):
vwp = []
for i in self.visited_wp:
vwp.append(i.name)
yml = yaml.safe_dump(vwp, default_flow_style=False)
fh = open("visited.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close
elif k == ord('l'):
print "loading visited"
with open("visited.yaml", 'r') as f:
visited = yaml.load(f)
for i in visited:
for l in self.topo_map.waypoints:
if i == l.name:
self.visited_wp.append(l)
break
elif k == ord('a'):
self.grid.calculate_mean_grid()
self.draw_means()
self.draw_mode="means"
elif k == ord('p'):
self.pause_exp= not self.pause_exp
elif k == ord('c'):
print self.grid.limits
print "Area: ", self.grid.calculate_area(self.grid.limits)
print "Area of Area: ", self.grid.area.area_size
colours=['magenta','cyan', 'grey','white','red','yellow','green','blue']
nc=0
for j in self.grid.area_splits:
print j.area_size
#self.limits_canvas.draw_coordinate(j.centre, 'crimson', size=3, thickness=2)
for i in j.limit_lines:
#self.limits_canvas.draw_line(i, colours[nc], thickness=1)
self.limits_canvas.draw_line(i, 'white', thickness=1)
if nc < len(colours)-1:
nc+=1
else:
nc=0
self.redraw()
elif k== ord('r'):
#diff = (self.grid.models[1].output - self.grid.models[0].output)
#print np.mean(diff), np.std(diff), diff.dtype
print self.get_errors()
elif k== ord('o'):
print self.results
outfile = self.expid + '.yaml'
#print self.data_out
yml = yaml.safe_dump(self.results, default_flow_style=False)
fh = open(outfile, "w")
s_output = str(yml)
#print s_output
fh.write(s_output)
fh.close
def get_errors(self):
error_chain=[]
shapeo = self.grid.models[0].output.shape
#print vals
print "Waiting for Service"
rospy.wait_for_service('/compare_model')
compare_serv = rospy.ServiceProxy('/compare_model', CompareModels)
for i in range(self.n_models):
try:
d={}
print "going for it ", i
vals = np.reshape(self.grid.models[i].output, -1)
resp1 = compare_serv('kriging', i, shapeo[0], shapeo[1], vals.tolist())
d['name']= self.grid.models[i].name
d['type']= 'kriging'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
d={}
print "Mean "
vals = np.reshape(self.grid.mean_output, -1)
resp1 = compare_serv('mean', 0, shapeo[0], shapeo[1], vals.tolist())
#print self.grid.mean_output
d['name']= 'mean'
d['type']= 'mean'
d['errors']={}
d['errors']['error']=resp1.error
d['errors']['mse']=resp1.mse
d['errors']['std']=resp1.std
d['errors']['var']=resp1.var
#print resp1
error_chain.append(d)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return error_chain
def signal_handler(self, signal, frame):
self.running = False
print('You pressed Ctrl+C!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cell_size", type=int, default=10,
help="cell size in meters")
parser.add_argument("--initial_percent", type=float, default=0.05,
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--limits_file", type=str, default='limits.coords',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--initial_waypoint", type=str, default='WayPoint498',
help="Percentage of cells to be explored on the initial plan")
parser.add_argument("--area_coverage_type", type=str, default='area_split',
help="Type of area coverage, random or area_split")
parser.add_argument("--experiment_name", type=str, default='exp1',
help="Experiment ID")
args = parser.parse_args()
rospy.init_node('kriging_exploration')
#Explorator(53.261685, -0.527158, 16, 640, args.cell_size)
#Explorator(53.267213, -0.533420, 17, 640, args) #Football Field
Explorator(53.261576, -0.526648, 17, 640, args) #Half cosmos field
#Explorator(53.261685, -0.525158, 17, 640, args) #COSMOS Field
| mit |
sharthee/ProgrammingAssignment2 | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
CIFASIS/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
dingocuster/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter5/fig_likelihood_cauchy.py | 3 | 3219 | """
Log-likelihood for Cauchy Distribution
--------------------------------------
Figure 5.10
An illustration of the logarithm of posterior probability distribution for
:math:`\mu` and :math:`\gamma`, :math:`L(\mu,\gamma)` (see eq. 5.75) for
N = 10 (the sample is generated using the Cauchy distribution with
:math:`\mu = 0` and :math:`\gamma = 2`). The maximum of L is renormalized
to 0, and color coded as shown in the legend. The contours enclose the regions
that contain 0.683, 0.955 and 0.997 of the cumulative (integrated) posterior
probability.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.plotting.mcmc import convert_to_stdev
from astroML.stats import median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(xi, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
xi = np.asarray(xi)
n = xi.size
shape = np.broadcast(gamma, mu).shape
xi = xi.reshape(xi.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0))
#------------------------------------------------------------
# Define the grid and compute logL
gamma = np.linspace(0.1, 5, 70)
mu = np.linspace(-5, 5, 70)
np.random.seed(44)
mu0 = 0
gamma0 = 2
xi = cauchy(mu0, gamma0).rvs(10)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Find the max and print some information
i, j = np.where(logL >= np.max(logL))
print("mu from likelihood:", mu[j])
print("gamma from likelihood:", gamma[i])
print()
med, sigG = median_sigmaG(xi)
print("mu from median", med)
print("gamma from quartiles:", sigG / 1.483) # Equation 3.54
print()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower', cmap=plt.cm.binary,
extent=(mu[0], mu[-1], gamma[0], gamma[-1]),
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, gamma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93,
r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\gamma$')
plt.show()
| bsd-2-clause |
AllenDowney/SoftwareSystems | hw04/wave3/thinkdsp.py | 23 | 31996 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import array
import math
import numpy
import random
import scipy
import scipy.stats
import struct
import subprocess
import thinkplot
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16}
assert sampwidth in dtype_map
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
self.hs = hs
self.framerate = framerate
n = len(hs)
self.fs = numpy.linspace(0, self.max_freq, n)
def __add__(self, other):
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] = 0
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def angles(self, i):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.Plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def __len__(self):
return len(self.ys)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = start * self.framerate
if duration is None:
j = None
else:
j = i + duration * self.framerate
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
n = len(self.ys)
ts = numpy.linspace(0, self.duration, n)
thinkplot.plot(ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: 2x2 covariance matrix
"""
mat = self.cov_mat(other)
corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1])
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print 'Writing', filename
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
print 'Warning: normalizing before quantizing.'
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n / denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a consine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
#n = len(freqs)
#print freqs[::n/2]
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, 1, len(ts))
ys = normalize(ys, self.amp)
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
#dys = numpy.random.normal(0, 1, len(ts))
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = numpy.cumsum(dys)
ys = scipy.integrate.cumtrapz(dys, ts)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print cos_cov, sin_cov, mag((cos_cov, sin_cov))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print sig_cons
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
jjx02230808/project0223 | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
idlead/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
fboers/jumeg | examples/do_MLICA.py | 1 | 5891 | """
Compute ICA object based on filtered and downsampled data.
Identify ECG and EOG artifacts using MLICA and compare
results to correlation & ctps analysis.
Apply ICA object to filtered and unfiltered data.
Ahmad Hasasneh, Nikolas Kampel, Praveen Sripad, N. Jon Shah, and Juergen Dammers
"Deep Learning Approach for Automatic Classification of Ocular and Cardiac
Artifacts in MEG Data"
Journal of Engineering, vol. 2018, Article ID 1350692,10 pages, 2018.
https://doi.org/10.1155/2018/1350692
"""
import os.path as op
import matplotlib.pylab as plt
plt.ion()
import numpy as np
import mne
from jumeg.decompose.ica_replace_mean_std import ICA, ica_update_mean_std
from keras.models import load_model
from jumeg.jumeg_noise_reducer import noise_reducer
from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular
from jumeg.jumeg_plot import plot_performance_artifact_rejection
from jumeg.jumeg_utils import get_jumeg_path
# config
MLICA_threshold = 0.8
n_components = 60
njobs = 4 # for downsampling
tmin = 0
tmax = tmin + 15000
flow_ecg, fhigh_ecg = 8, 20
flow_eog, fhigh_eog = 1, 20
ecg_thresh, eog_thresh = 0.3, 0.3
ecg_ch = 'ECG 001'
eog1_ch = 'EOG 001'
eog2_ch = 'EOG 002'
reject = {'mag': 5e-12}
refnotch = [50., 100., 150., 200., 250., 300., 350., 400.]
data_path = op.join(get_jumeg_path(), 'data')
print(data_path)
# example filname
raw_fname = "/Volumes/megraid21/sripad/cau_fif_data/jumeg_test_data/" \
"109925_CAU01A_100715_0842_2_c,rfDC-raw.fif"
# load the model for artifact rejection
# the details of the model is provided in the x_validation_shuffle_v4_split_23.txt
model_name = op.join(data_path, "dcnn_model.hdf5")
model = load_model(model_name)
# noise reducer
raw_nr = noise_reducer(raw_fname, reflp=5., return_raw=True)
raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=0.1, noiseref=['RFG ...'],
return_raw=True)
# 50HZ and 60HZ notch filter to remove noise
raw = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch, return_raw=True)
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
raw_filtered = raw.copy().filter(0., 45., picks=picks, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
n_jobs=njobs, method='fir', phase='zero',
fir_window='hamming')
# downsample the data to 250 Hz, necessary for the model
raw_ds = raw_filtered.copy().resample(250, npad='auto', window='boxcar', stim_picks=None,
n_jobs=njobs, events=None)
raw_ds_chop = raw_ds.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000) # downsampled raw
raw_filtered_chop = raw_filtered.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
raw_chop = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
ica = ICA(method='fastica', n_components=n_components, random_state=42,
max_pca_components=None, max_iter=5000, verbose=None)
# do the ICA decomposition on downsampled raw
ica.fit(raw_ds_chop, picks=picks, reject=reject, verbose=None)
sources = ica.get_sources(raw_ds_chop)._data
# extract temporal and spatial components
mm = np.float32(np.dot(ica.mixing_matrix_[:, :].T,
ica.pca_components_[:ica.n_components_]))
# use [:, :15000] to make sure it's 15000 data points
chop = sources[:, :15000]
chop_reshaped = np.reshape(chop, (len(chop), len(chop[0]), 1))
model_scores = model.predict([mm, chop_reshaped], verbose=1)
bads_MLICA = []
# print model_scores
for idx in range(0, len(model_scores)):
if model_scores[idx][0] > MLICA_threshold:
bads_MLICA.append(idx)
# visualisation
# ica.exclude = bads_MLICA
# ica.plot_sources(raw_ds_chop, block=True)
# compare MLICA to results from correlation and ctps analysis
ica.exclude = []
print('Identifying components..')
# get ECG/EOG related components using JuMEG
ic_ecg = get_ics_cardiac(raw_filtered_chop, ica, flow=flow_ecg, fhigh=fhigh_ecg,
thresh=ecg_thresh, tmin=-0.5, tmax=0.5,
name_ecg=ecg_ch, use_CTPS=True)[0] # returns both ICs and scores (take only ICs)
ic_eog = get_ics_ocular(raw_filtered_chop, ica, flow=flow_eog, fhigh=fhigh_eog,
thresh=eog_thresh, name_eog_hor=eog1_ch,
name_eog_ver=eog2_ch, score_func='pearsonr')
bads_corr_ctps = list(ic_ecg) + list(ic_eog)
bads_corr_ctps = list(set(bads_corr_ctps)) # remove potential duplicates
bads_corr_ctps.sort()
# visualisation
# ica.exclude = bads_corr_ctps
# ica.plot_sources(raw_chop, block=True)
print('Bad components from MLICA:', bads_MLICA)
print('Bad components from correlation & ctps:', bads_corr_ctps)
# apply MLICA result to filtered and unfiltered data
# exclude bad components identified by MLICA
ica.exclude = bads_MLICA
fnout_fig = '109925_CAU01A_100715_0842_2_c,rfDC,0-45hz,ar-perf'
ica_filtered_chop = ica_update_mean_std(raw_filtered_chop, ica, picks=picks, reject=reject)
raw_filtered_chop_clean = ica_filtered_chop.apply(raw_filtered_chop, exclude=ica.exclude,
n_pca_components=None)
ica_unfiltered_chop = ica_update_mean_std(raw_chop, ica, picks=picks, reject=reject)
raw_unfiltered_chop_clean = ica_unfiltered_chop.apply(raw_chop, exclude=ica.exclude, n_pca_components=None)
# create copy of original data since apply_ica_replace_mean_std changes the input data in place (raw and ica)
raw_copy = raw.copy().crop(tmin=tmin*4./1000, tmax=tmax*4./1000)
plot_performance_artifact_rejection(raw_copy, ica_unfiltered_chop, fnout_fig,
meg_clean=raw_unfiltered_chop_clean,
show=False, verbose=False,
name_ecg=ecg_ch,
name_eog=eog2_ch)
| bsd-3-clause |
irblsensitivity/irblsensitivity | scripts/analysis/MWU_Project_EMSE.py | 1 | 9231 | #-*- coding: utf-8 -*-
'''
Created on 2017. 02. 12
Updated on 2017. 02. 12
'''
from __future__ import print_function
import os
import re
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from scipy.stats import mannwhitneyu, pearsonr
from ExpBase import ExpBase
import numpy as np
from commons import Subjects
class MWUTest(ExpBase):
techniques = ['BugLocator', 'BRTracer', 'BLUiR', 'AmaLgam', 'BLIA', 'Locus']
validDigits = {
'AvgLOC': 2, 'InvNSrc': 4, 'AvgCC': 4, 'SrcAvgDistTk': 2, 'SrcAvgNTk': 2, 'SrcRatioDict': 4, 'NSrc': 2, 'SrcNumCmt': 4, 'SrcNDistTk': 0, 'SrcLocalDistTk': 3, 'SrcRatioCmt': 4, 'SrcNumMhd': 4, 'RatioEnum': 4,
'RepAvgTk': 2, 'NReport': 0, 'RepNDistTk': 0, 'RepAvgDistTk': 3, 'RepAvgLocalTk':4, 'RepAvgCE': 4, 'RatioCode': 4, 'RatioSTrace': 4, '|STinterRT|': 0,
'AvgMinIRf': 4, 'AvgMaxIRf': 4, 'AvgMeanIRf': 4, 'KSDist': 4, 'AvgUIRf': 4, 'AvgProdIRf': 4, 'hasCE': 4,
'hasSTrace': 4, 'hasCR': 4, 'hasEnum': 4,
'NTk':2, 'NDistTk':3, 'NLocalTk':4, 'NDistCE':3
}
featureorders = {
'01': ['AvgLOC', 'AvgCC', 'SrcAvgNTk', 'SrcAvgDistTk', 'SrcLocalDistTk', 'SrcNDistTk', 'NSrc', 'InvNSrc',
'SrcNumMhd',
'SrcNumCmt', 'SrcRatioCmt', 'SrcRatioDict'],
'02': ['RatioEnum', 'RatioSTrace', 'RatioCode', 'RepNDistTk', 'RepAvgTk', 'RepAvgDistTk', 'RepAvgLocalTk', 'RepAvgCE',
'NReport'],
'03': ['|STinterRT|', 'KSDist', 'AvgProdIRf', 'AvgMinIRf', 'AvgMaxIRf', 'AvgMeanIRf', 'AvgUIRf'],
'04': ['hasEnum', 'hasSTrace', 'hasCR', 'hasCE'],
'05': ['NTk', 'NDistTk', 'NLocalTk', 'NDistCE']
}
def MWUtest(self, _dataA, _dataB, _bugsA=None, _bugsB=None):
'''
Mann-Whitney U Test between IRBL technique results
:param _nameA: The results of Type A
:param _nameB: The results of Type B
:param _bugsA: the count of bugs for each techniques
:param _bugsB: the count of bugs for each techniques
:return: {technique : pvalue, techinique: pvalue, ...}
'''
results = {}
for idx in range(len(self.techniques)):
filteredDataA = [items[idx] for items in _dataA.values()]
filteredDataB = [items[idx] for items in _dataB.values()]
#filteredDataA, labels = self.get_array_items(_dataA, idx)
#filteredDataB, labels = self.get_array_items(_dataB, idx)
if _bugsA is not None:
if isinstance(_bugsA, dict) is True:
filteredDataA += ([0] * (_bugsA[self.techniques[idx]] - len(filteredDataA)))
else:
filteredDataA += ([0] * (_bugsA - len(filteredDataA)))
if _bugsB is not None:
if isinstance(_bugsB, dict) is True:
filteredDataB += ([0] * (_bugsB[self.techniques[idx]] - len(filteredDataB)))
else:
filteredDataB += ([0] * (_bugsB - len(filteredDataB)))
#slope, intercept, r_value, p_value, stderr = stats.linregress(dataMAP, dataFeature)
t_statistic, t_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided')
l_statistic, l_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='less')
g_statistic, g_pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='greater')
pvalue = min(t_pvalue , l_pvalue, g_pvalue)
#statistic, pvalue = mannwhitneyu(filteredDataA, filteredDataB, use_continuity=True, alternative='two-sided') # 'less', 'two-sided', 'greater'
results[self.techniques[idx]] = pvalue
return results
def get_technique_averages(self, _source, _counts):
'''
:param _source: project's bug results dict
:param _count: original bug counts for each technique
:return:
'''
results = {}
for idx in range(len(self.techniques)):
sumValue = 0
for itemID, item in _source.iteritems():
sumValue += item[idx]
results[self.techniques[idx]] = sumValue / float(_counts[self.techniques[idx]])
return results
def compare_single_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_averages(_itemType):
results = {}
for tData in ['Old', 'New_Single']:
filepath = os.path.join(_basepath, u'%s_%s.txt' % (tData, _itemType))
titles, data = self.load_results_items(filepath, ['str'] * 3 + ['float'] * 6)
for group in data:
if group not in results: results[group] = {}
for project in data[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(data[group][project], CNTs)
return results
APresults = get_averages('AP')
TPresults = get_averages('TP')
features = self.extract_features(_basepath)
print(u'Technique Mann-Whitney U Test p-values')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
S.groups.append(u'Previous')
S.projects[u'Previous'] = [u'AspectJ', u'ZXing', u'PDE', u'JDT', u'SWT']
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f}' % TPresults[group][project][tech]
# if group in features:
# for fid in [u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk']:
# text += u' & %.4f' % features[group][project][fid]
# text += u' \\\\'
# else:
# text += u' & & & & \\\\'
text += u' \\\\'
print(text)
pass
def compare_multi_results(self, _basepath):
'''
for Table 7 : single results
:param _basepath:
:return:
'''
techinques, CNTdata = self.load_results(os.path.join(_basepath, u'BugCNT.txt'), ['str'] * 2 + ['int'] * 6)
def get_average_mwu(_itemType):
results = {}
multi = os.path.join(_basepath, u'New_Multiple_%s.txt' % _itemType)
titles, dataM = self.load_results_items(multi, ['str'] * 3 + ['float'] * 6)
# MWUresults = {}
# single = os.path.join(_basepath, u'New_Single_%s.txt' % _itemType)
# titles, dataS = self.load_results_items(single, ['str'] * 3 + ['float'] * 6)
for group in dataM:
if group not in results: results[group] = {}
#if group not in MWUresults: MWUresults[group] = {}
for project in dataM[group]:
CNTs = dict(zip(titles, CNTdata[group][project]))
results[group][project] = self.get_technique_averages(dataM[group][project], CNTs)
#MWUresults[group][project] = self.MWUtest(dataS[group][project], dataM[group][project], CNTs, CNTs)
return results #, MWUresults
APresults = get_average_mwu('AP')
TPresults = get_average_mwu('TP')
print(u'')
print(u'\t' + u'\t\t'.join(self.techniques))
print(u'Subject\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR\tMAP\tMRR')
S = Subjects()
for group in S.groups:
for project in S.projects[group]:
text = u'%s' % project
APmax = self.techniques[0]
TPmax = self.techniques[0]
for tech in self.techniques:
if APresults[group][project][APmax] < APresults[group][project][tech]:
APmax = tech
if TPresults[group][project][TPmax] < TPresults[group][project][tech]:
TPmax = tech
for tech in self.techniques:
if APmax != tech: text += u' & %.4f' % APresults[group][project][tech]
else: text += u' & \\cellcolor{blue!25}\\textbf{%.4f}' % APresults[group][project][tech]
if TPmax != tech: text += u' & %.4f ' % TPresults[group][project][tech]
else: text += u' & \\cellcolor{green!25}\\textbf{%.4f} ' % TPresults[group][project][tech]
print(text, end=u'')
print(u' \\\\')
pass
def extract_features(self, _basepath):
titles, data = self.load_results(os.path.join(_basepath, u'02_PW_Bug_Features.txt'), ['str'] * 2 + ['int'] + ['float'] * 3 + ['int', 'float'] )
for group in data:
for project in data[group]:
item = data[group][project]
data[group][project] = dict(zip([u'RatioEnum', u'RatioSTrace', u'RatioCode', u'RepAvgTk'], [item[1], item[2], item[3], item[5]]))
return data
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
basepath = u'/mnt/exp/Bug/analysis/'
obj = MWUTest()
obj.compare_multi_results(basepath)
obj.compare_single_results(basepath)
# obj.compare_test(basepath)
#obj.calc_pearson(basepath)
#obj.compare_dup_results(basepath)
| apache-2.0 |
fmfn/UnbalancedDataset | examples/under-sampling/plot_illustration_tomek_links.py | 2 | 3180 | """
==============================================
Illustration of the definition of a Tomek link
==============================================
This example illustrates what is a Tomek link.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# This function allows to make nice plotting
# %%
def make_plot_despine(ax):
sns.despine(ax=ax, offset=10)
ax.set_xlim([0, 3])
ax.set_ylim([0, 3])
ax.set_xlabel(r"$X_1$")
ax.set_ylabel(r"$X_2$")
ax.legend(loc="lower right")
# %% [markdown]
# We will generate some toy data that illustrates how
# :class:`~imblearn.under_sampling.TomekLinks` is used to clean a dataset.
# %%
import numpy as np
rng = np.random.RandomState(18)
X_minority = np.transpose(
[[1.1, 1.3, 1.15, 0.8, 0.55, 2.1], [1.0, 1.5, 1.7, 2.5, 0.55, 1.9]]
)
X_majority = np.transpose(
[
[2.1, 2.12, 2.13, 2.14, 2.2, 2.3, 2.5, 2.45],
[1.5, 2.1, 2.7, 0.9, 1.0, 1.4, 2.4, 2.9],
]
)
# %% [markdown]
# In the figure above, the samples highlighted in green form a Tomek link since
# they are of different classes and are nearest neighbors of each other.
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(
X_minority[:, 0],
X_minority[:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_majority[:, 0],
X_majority[:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
make_plot_despine(ax)
fig.suptitle("Illustration of a Tomek link")
fig.tight_layout()
# %% [markdown]
# We can run the :class:`~imblearn.under_sampling.TomekLinks` sampling to
# remove the corresponding samples. If `sampling_strategy='auto'` only the
# sample from the majority class will be removed. If `sampling_strategy='all'`
# both samples will be removed.
# %%
from imblearn.under_sampling import TomekLinks
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(16, 8))
samplers = {
"Removing only majority samples": TomekLinks(sampling_strategy="auto"),
"Removing all samples": TomekLinks(sampling_strategy="all"),
}
for ax, (title, sampler) in zip(axs, samplers.items()):
X_res, y_res = sampler.fit_resample(
np.vstack((X_minority, X_majority)),
np.array([0] * X_minority.shape[0] + [1] * X_majority.shape[0]),
)
ax.scatter(
X_res[y_res == 0][:, 0],
X_res[y_res == 0][:, 1],
label="Minority class",
s=200,
marker="_",
)
ax.scatter(
X_res[y_res == 1][:, 0],
X_res[y_res == 1][:, 1],
label="Majority class",
s=200,
marker="+",
)
# highlight the samples of interest
ax.scatter(
[X_minority[-1, 0], X_majority[1, 0]],
[X_minority[-1, 1], X_majority[1, 1]],
label="Tomek link",
s=200,
alpha=0.3,
)
ax.set_title(title)
make_plot_despine(ax)
fig.tight_layout()
plt.show()
| mit |
HiSPARC/sapphire | scripts/simulations/analyze_shower_front.py | 1 | 5153 | import numpy as np
import tables
from scipy.optimize import curve_fit
from scipy.stats import scoreatpercentile
from artist import GraphArtist
from pylab import *
import matplotlib.pyplot as plt
import utils
USE_TEX = False
# For matplotlib plots
if USE_TEX:
rcParams['font.serif'] = 'Computer Modern'
rcParams['font.sans-serif'] = 'Computer Modern'
rcParams['font.family'] = 'sans-serif'
rcParams['figure.figsize'] = [4 * x for x in (1, 2. / 3)]
rcParams['figure.subplot.left'] = 0.175
rcParams['figure.subplot.bottom'] = 0.175
rcParams['font.size'] = 10
rcParams['legend.fontsize'] = 'small'
rcParams['text.usetex'] = True
def main():
global data
data = tables.open_file('master-ch4v2.h5', 'r')
#utils.set_suffix('E_1PeV')
#scatterplot_core_distance_vs_time()
#median_core_distance_vs_time()
boxplot_core_distance_vs_time()
#hists_core_distance_vs_time()
plot_front_passage()
def scatterplot_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
plt.loglog(electrons[:]['core_distance'], electrons[:]['arrival_time'], ',')
plt.xlim(1e0, 1e2)
plt.ylim(1e-3, 1e3)
plt.xlabel("Core distance [m]")
plt.ylabel("Arrival time [ns]")
utils.title("Shower front timing structure")
utils.saveplot()
def median_core_distance_vs_time():
plt.figure()
plot_and_fit_statistic(lambda a: scoreatpercentile(a, 25))
plot_and_fit_statistic(lambda a: scoreatpercentile(a, 75))
utils.title("Shower front timing structure (25, 75 %)")
utils.saveplot()
plt.xlabel("Core distance [m]")
plt.ylabel("Median arrival time [ns]")
legend(loc='lower right')
def plot_and_fit_statistic(func):
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
bins = np.logspace(0, 2, 25)
x, y = [], []
for low, high in zip(bins[:-1], bins[1:]):
sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')
statistic = func(sel[:]['arrival_time'])
x.append(np.mean([low, high]))
y.append(statistic)
plt.loglog(x, y)
logx = log10(x)
logy = log10(y)
logf = lambda x, a, b: a * x + b
g = lambda x, a, b: 10 ** logf(log10(x), a, b)
popt, pcov = curve_fit(logf, logx, logy)
plot(x, g(x, *popt), label="f(x) = %.2e * x ^ %.2e" % (10 ** popt[1],
popt[0]))
def boxplot_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0.shower_0
leptons = sim.leptons
#bins = np.logspace(0, 2, 25)
bins = np.linspace(0, 100, 15)
x, arrival_time, widths = [], [], []
t25, t50, t75 = [], [], []
for low, high in zip(bins[:-1], bins[1:]):
sel = leptons.read_where('(low < core_distance) & (core_distance <= high)')
x.append(np.mean([low, high]))
arrival_time.append(sel[:]['arrival_time'])
widths.append((high - low) / 2)
ts = sel[:]['arrival_time']
t25.append(scoreatpercentile(ts, 25))
t50.append(scoreatpercentile(ts, 50))
t75.append(scoreatpercentile(ts, 75))
fill_between(x, t25, t75, color='0.75')
plot(x, t50, 'o-', color='black')
plt.xlabel("Core distance [m]")
plt.ylabel("Arrival time [ns]")
#utils.title("Shower front timing structure")
utils.saveplot()
graph = GraphArtist()
graph.plot(x, t50, linestyle=None)
graph.shade_region(x, t25, t75)
graph.set_xlabel(r"Core distance [\si{\meter}]")
graph.set_ylabel(r"Arrival time [\si{\nano\second}]")
graph.set_ylimits(0, 30)
graph.set_xlimits(0, 100)
graph.save('plots/front-passage-vs-R')
def hists_core_distance_vs_time():
plt.figure()
sim = data.root.showers.E_1PeV.zenith_0
electrons = sim.electrons
bins = np.logspace(0, 2, 5)
for low, high in zip(bins[:-1], bins[1:]):
sel = electrons.read_where('(low < core_distance) & (core_distance <= high)')
arrival_time = sel[:]['arrival_time']
plt.hist(arrival_time, bins=np.logspace(-2, 3, 50), histtype='step',
label="%.2f <= log10(R) < %.2f" % (np.log10(low),
np.log10(high)))
plt.xscale('log')
plt.xlabel("Arrival Time [ns]")
plt.ylabel("Count")
plt.legend(loc='upper left')
utils.title("Shower front timing structure")
utils.saveplot()
def plot_front_passage():
sim = data.root.showers.E_1PeV.zenith_0.shower_0
leptons = sim.leptons
R = 40
dR = 2
low = R - dR
high = R + dR
global t
t = leptons.read_where('(low < core_distance) & (core_distance <= high)',
field='arrival_time')
n, bins, patches = hist(t, bins=linspace(0, 30, 31), histtype='step')
graph = GraphArtist()
graph.histogram(n, bins)
graph.set_xlabel(r"Arrival time [\si{\nano\second}]")
graph.set_ylabel("Number of leptons")
graph.set_ylimits(min=0)
graph.set_xlimits(0, 30)
graph.save('plots/front-passage')
if __name__ == '__main__':
main()
| gpl-3.0 |
jmbeuken/abinit | scripts/post_processing/abinit_eignc_to_bandstructure.py | 3 | 47417 | #!/usr/bin/python
#=================================================================#
# Script to plot the bandstructure from an abinit bandstructure #
# _EIG.nc netcdf file or from a wannier bandstructure, or from #
# an _EIG.nc file+GW file+ bandstructure _EIG.nc file #
#=================================================================#
#########
#IMPORTS#
#########
import numpy as N
import matplotlib.pyplot as P
import netCDF4 as nc
import sys
import os
import argparse
import time
#############
##VARIABLES##
#############
class VariableContainer:pass
#Constants
csts = VariableContainer()
csts.hartree2ev = N.float(27.211396132)
csts.ev2hartree = N.float(1/csts.hartree2ev)
csts.sqrtpi = N.float(N.sqrt(N.pi))
csts.invsqrtpi = N.float(1/csts.sqrtpi)
csts.TOLKPTS = N.float(0.00001)
###########
##CLASSES##
###########
class PolynomialFit(object):
def __init__(self):
self.degree = 2
class EigenvalueContainer(object):
nsppol = None
nkpt = None
mband = None
eigenvalues = None
units = None
wtk = None
filename = None
filefullpath = None
bd_indices = None
eigenvalue_type = None
kpoints = None
#kpoint_sampling_type: can be Monkhorst-Pack or Bandstructure
KPT_W90_TOL = N.float(1.0e-6)
KPT_DFT_TOL = N.float(1.0e-8)
kpoint_sampling_type = 'Monkhorst-Pack'
inputgvectors = None
gvectors = None
special_kpoints = None
special_kpoints_names = None
special_kpoints_indices = None
kpoint_path_values = None
kpoint_reduced_path_values = None
kpoint_path_length = None
#reduced_norm = None
norm_paths = None
norm_reduced_paths = None
def __init__(self,directory=None,filename=None):
if filename == None:return
if directory == None:directory='.'
self.filename = filename
self.filefullpath = '%s/%s' %(directory,filename)
self.file_open(self.filefullpath)
def set_kpoint_sampling_type(self,kpoint_sampling_type):
if kpoint_sampling_type != 'Monkhorst-Pack' and kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type "%s" does not exists' %kpoint_sampling_type
print ' it should be "Monkhorst-Pack" or "Bandstructure" ... exit'
sys.exit()
self.kpoint_sampling_type = kpoint_sampling_type
def correct_kpt(self,kpoint,tolerance=N.float(1.0e-6)):
kpt_correct = N.array(kpoint,N.float)
changed = False
for ii in range(3):
if N.allclose(kpoint[ii],N.float(1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/3.0)
changed = True
elif N.allclose(kpoint[ii],N.float(1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/3.0)
changed = True
if changed:
print 'COMMENT: kpoint %15.12f %15.12f %15.12f has been changed to %15.12f %15.12f %15.12f' %(kpoint[0],kpoint[1],kpoint[2],kpt_correct[0],kpt_correct[1],kpt_correct[2])
return kpt_correct
def find_special_kpoints(self,gvectors=None):
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: special kpoints are usefull only for bandstructures ... returning find_special_kpoints'
return
if self.eigenvalue_type == 'W90':
correct_kpt_tolerance = N.float(1.0e-4)
KPT_TOL = self.KPT_W90_TOL
elif self.eigenvalue_type == 'DFT':
correct_kpt_tolerance = N.float(1.0e-6)
KPT_TOL = self.KPT_DFT_TOL
else:
print 'ERROR: eigenvalue_type is "%s" while it should be "W90" or "DFT" ... returning find_special_kpoints' %self.eigenvalue_type
return
if gvectors == None:
self.inputgvectors = False
self.gvectors = N.identity(3,N.float)
else:
if N.shape(gvectors) != (3, 3):
print 'ERROR: wrong gvectors ... exiting now'
sys.exit()
self.inputgvectors = True
self.gvectors = gvectors
full_kpoints = N.zeros((self.nkpt,3),N.float)
for ikpt in range(self.nkpt):
full_kpoints[ikpt,:] = self.kpoints[ikpt,0]*self.gvectors[0,:]+self.kpoints[ikpt,1]*self.gvectors[1,:]+self.kpoints[ikpt,2]*self.gvectors[2,:]
delta_kpt = full_kpoints[1,:]-full_kpoints[0,:]
self.special_kpoints_indices = list()
self.special_kpoints = list()
self.special_kpoints_indices.append(0)
self.special_kpoints.append(self.correct_kpt(self.kpoints[0,:],tolerance=correct_kpt_tolerance))
for ikpt in range(1,self.nkpt-1):
thisdelta = full_kpoints[ikpt+1,:]-full_kpoints[ikpt,:]
if not N.allclose(thisdelta,delta_kpt,atol=KPT_TOL):
delta_kpt = thisdelta
self.special_kpoints_indices.append(ikpt)
self.special_kpoints.append(self.correct_kpt(self.kpoints[ikpt,:],tolerance=correct_kpt_tolerance))
self.special_kpoints_indices.append(N.shape(self.kpoints)[0]-1)
self.special_kpoints.append(self.correct_kpt(self.kpoints[-1,:],tolerance=correct_kpt_tolerance))
print 'Special Kpoints : '
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(1,self.kpoints[0,:])
self.norm_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
self.norm_reduced_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
self.norm_paths[ispkpt-1] = N.linalg.norm(full_kpoints[self.special_kpoints_indices[ispkpt]]-full_kpoints[self.special_kpoints_indices[ispkpt-1]])
self.norm_reduced_paths[ispkpt-1] = N.linalg.norm(self.special_kpoints[ispkpt]-self.special_kpoints[ispkpt-1])
print ' {2:d}-{3:d} path length : {0: 8.8f} | reduced path length : {1: 8.8f}'.\
format(self.norm_paths[ispkpt-1],self.norm_reduced_paths[ispkpt-1],ispkpt,ispkpt+1)
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(ispkpt+1,self.kpoints[self.special_kpoints_indices[ispkpt],:])
self.kpoint_path_length = N.sum(self.norm_paths)
self.kpoint_reduced_path_length = N.sum(self.norm_reduced_paths)
self.normalized_kpoint_path_norm = self.norm_paths/self.kpoint_path_length
self.normalized_kpoint_reduced_path_norm = self.norm_reduced_paths/self.kpoint_reduced_path_length
kptredpathval = list()
kptpathval = list()
kptredpathval.append(N.float(0.0))
kptpathval.append(N.float(0.0))
curlen = N.float(0.0)
redcurlen = N.float(0.0)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
kptredpathval.extend(N.linspace(redcurlen,redcurlen+self.norm_reduced_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
kptpathval.extend(N.linspace(curlen,curlen+self.norm_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
redcurlen = redcurlen + self.norm_reduced_paths[ispkpt-1]
curlen = curlen + self.norm_paths[ispkpt-1]
self.kpoint_path_values = N.array(kptpathval,N.float)
self.kpoint_reduced_path_values = N.array(kptredpathval,N.float)
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.special_kpoints = N.array(self.special_kpoints,N.float)
def file_open(self,filefullpath):
if filefullpath[-3:] == '_GW':
self.gw_file_open(filefullpath)
elif filefullpath[-7:] == '_EIG.nc':
self.nc_eig_open(filefullpath)
elif filefullpath[-4:] == '.dat':
self.wannier_bs_file_open(filefullpath)
def has_eigenvalue(self,nsppol,isppol,kpoint,iband):
if self.nsppol != nsppol:
return False
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
if iband >= self.bd_indices[isppol,ikpt,0]-1 and iband < self.bd_indices[isppol,ikpt,1]:
return True
return False
return False
def get_eigenvalue(self,nsppol,isppol,kpoint,iband):
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
return self.eigenvalues[isppol,ikpt,iband]
def wannier_bs_file_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
print 'WARNING: no spin polarization reading yet for Wannier90 bandstructure files!'
self.eigenvalue_type = 'W90'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
reader = open(self.filefullpath,'r')
filedata = reader.readlines()
reader.close()
for iline in range(len(filedata)):
if filedata[iline].strip() == '':
self.nkpt = iline
break
self.mband = N.int(len(filedata)/self.nkpt)
self.nsppol = 1
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
self.kpoints = N.zeros([self.nkpt,3],N.float)
iline = 0
kpt_file = '%s.kpt' %filefullpath[:-4]
if os.path.isfile(kpt_file):
reader = open(kpt_file,'r')
kptdata = reader.readlines()
reader.close()
if N.int(kptdata[0]) != self.nkpt:
print 'ERROR : the number of kpoints in file "%s" is not the same as in "%s" ... exit' %(self.filefullpath,kpt_file)
sys.exit()
for ikpt in range(self.nkpt):
linesplit = kptdata[ikpt+1].split()
self.kpoints[ikpt,0] = N.float(linesplit[0])
self.kpoints[ikpt,1] = N.float(linesplit[1])
self.kpoints[ikpt,2] = N.float(linesplit[2])
else:
for ikpt in range(self.nkpt):
self.kpoints[ikpt,0] = N.float(filedata[ikpt].split()[0])
for iband in range(self.mband):
for ikpt in range(self.nkpt):
self.eigenvalues[0,ikpt,iband] = N.float(filedata[iline].split()[1])
iline = iline+1
iline = iline+1
self.eigenvalues = self.eigenvalues*csts.ev2hartree
self.units = 'Hartree'
def gw_file_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
self.eigenvalue_type = 'GW'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
reader = open(self.filefullpath,'r')
filedata = reader.readlines()
reader.close()
self.nkpt = N.int(filedata[0].split()[0])
self.kpoints = N.ones([self.nkpt,3],N.float)
self.nsppol = N.int(filedata[0].split()[1])
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
icur = 1
nbd_kpt = N.zeros([self.nsppol,self.nkpt],N.int)
for isppol in range(self.nsppol):
for ikpt in range(self.nkpt):
self.kpoints[ikpt,:] = N.array(filedata[icur].split()[:],N.float)
icur = icur + 1
nbd_kpt[isppol,ikpt] = N.int(filedata[icur])
self.bd_indices[isppol,ikpt,0] = N.int(filedata[icur+1].split()[0])
self.bd_indices[isppol,ikpt,1] = N.int(filedata[icur+nbd_kpt[isppol,ikpt]].split()[0])
icur = icur + nbd_kpt[isppol,ikpt] + 1
self.mband = N.max(self.bd_indices[:,:,1])
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
self.eigenvalues[:,:,:] = N.nan
ii = 3
for isppol in range(self.nsppol):
for ikpt in range(self.nkpt):
for iband in range(self.bd_indices[isppol,ikpt,0]-1,self.bd_indices[isppol,ikpt,1]):
self.eigenvalues[isppol,ikpt,iband] = N.float(filedata[ii].split()[1])
ii = ii + 1
ii = ii + 2
self.eigenvalues = csts.ev2hartree*self.eigenvalues
self.units = 'Hartree'
def pfit_gw_file_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
writer.write('%12s%12s\n' %(self.nkpt,self.nsppol))
if gwec == None:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
else:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
if gwec.has_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband):
gw_eig = gwec.get_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband)
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*gw_eig,csts.hartree2ev*(gw_eig-self.eigenvalues[isppol,ikpt,iband]),0.0))
else:
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
writer.close()
def pfit_dft_to_gw_bs_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
if gwec == None:
for ikpt in range(self.nkpt):
writer.write('%s' %ikpt)
for isppol in range(self.nsppol):
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write(' %s' %(csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta))
writer.write('\n')
else:
print 'NOT SUPPORTED YET'
sys.exit()
writer.close()
def nc_eig_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
ncdata = nc.Dataset(filefullpath)
self.eigenvalue_type = 'DFT'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
for dimname,dimobj in ncdata.dimensions.iteritems():
if dimname == 'nsppol':self.nsppol = N.int(len(dimobj))
if dimname == 'nkpt':self.nkpt = N.int(len(dimobj))
if dimname == 'mband':self.mband = N.int(len(dimobj))
for varname in ncdata.variables:
if varname == 'Eigenvalues':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.units = None
for attrname in varobj.ncattrs():
if attrname == 'units':
self.units = varobj.getncattr(attrname)
if self.units == None:
print 'WARNING : units are not specified'
print '... assuming "Hartree" units ...'
self.units = 'Hartree'
elif self.units != 'Hartree':
print 'ERROR : units are unknown : "%s"' %self.units
print '... exiting now ...'
sys.exit()
self.eigenvalues = N.reshape(N.array(varobj,N.float),varshape)
self.nsppol = varshape[0]
self.nkpt = varshape[1]
self.kpoints = -1*N.ones((self.nkpt,3),N.float)
self.mband = varshape[2]
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
self.bd_indices[:,:,0] = 1
self.bd_indices[:,:,1] = self.mband
break
for varname in ncdata.variables:
if varname == 'Kptns':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.kpoints = N.reshape(N.array(varobj,N.float),varshape)
def write_bandstructure_to_file(self,filename,option_kpts='bohrm1_units'):
#if option_kpts is set to 'normalized', the path of the bandstructure will be normalized to 1 (and special k-points correctly chosen)
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type is not "Bandstructure" ... returning from write_bandstructure_to_file'
return
if self.nsppol > 1:
print 'ERROR: number of spins is more than 1, this is not fully tested ... use with care !'
writer = open(filename,'w')
writer.write('# BANDSTRUCTURE FILE FROM DAVID\'S SCRIPT\n')
writer.write('# nsppol = %s\n' %self.nsppol)
writer.write('# nband = %s\n' %self.mband)
writer.write('# eigenvalue_type = %s\n' %self.eigenvalue_type)
if self.inputgvectors:
writer.write('# inputgvectors = 1 (%s)\n' %self.inputgvectors)
else:
writer.write('# inputgvectors = 0 (%s)\n' %self.inputgvectors)
writer.write('# gvectors(1) = %20.17f %20.17f %20.17f \n' %(self.gvectors[0,0],self.gvectors[0,1],self.gvectors[0,2]))
writer.write('# gvectors(2) = %20.17f %20.17f %20.17f \n' %(self.gvectors[1,0],self.gvectors[1,1],self.gvectors[1,2]))
writer.write('# gvectors(3) = %20.17f %20.17f %20.17f \n' %(self.gvectors[2,0],self.gvectors[2,1],self.gvectors[2,2]))
writer.write('# special_kpoints_number = %s\n' %(len(self.special_kpoints_indices)))
writer.write('# list of special kpoints : (given in reduced coordinates, value_path is in Bohr^-1, value_red_path has its total path normalized to 1)\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_index %5s : %20.17f %20.17f %20.17f (value_path = %20.17f | value_red_path = %20.17f)\n' %(ispkpt,spkpt[0],spkpt[1],spkpt[2],self.kpoint_path_values[ispkpt],self.kpoint_reduced_path_values[ispkpt]))
writer.write('# special_kpoints_names :\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_name %3s : "%s" : %20.17f %20.17f %20.17f\n' %(ii+1,self.special_kpoints_names[ii],spkpt[0],spkpt[1],spkpt[2]))
writer.write('# kpoint_path_length = %20.17f \n' %(self.kpoint_path_length))
writer.write('# kpoint_path_number = %s \n' %(self.nkpt))
if self.inputgvectors:
writer.write('# kpoint_path_units = %s\n' %(option_kpts))
else:
writer.write('# kpoint_path_units = %s (!!! CONSIDERING UNITARY GVECTORS MATRIX !!!)\n' %(option_kpts))
writer.write('#BEGIN\n')
if option_kpts == 'bohrm1_units':
values_path = self.kpoint_path_values
elif option_kpts == 'reduced':
values_path = self.kpoint_reduced_path_values
elif option_kpts == 'bohrm1_units_normalized':
values_path = self.normalized_kpoint_path_values
elif option_kpts == 'reduced_normalized':
values_path = self.normalized_kpoint_reduced_path_values
else:
print 'ERROR: wrong option_kpts ... exit'
writer.write('... CANCELLED (wrong option_kpts)')
writer.close()
sys.exit()
for isppol in range(self.nsppol):
writer.write('#isppol %s\n' %isppol)
for iband in range(self.mband):
writer.write('#iband %5s (band number %s)\n' %(iband,iband+1))
for ikpt in range(self.nkpt):
writer.write('%20.17f %20.17f\n' %(values_path[ikpt],self.eigenvalues[isppol,ikpt,iband]))
writer.write('\n')
writer.write('#END\n')
writer.write('\n#KPT_LIST\n')
for ikpt in range(self.nkpt):
writer.write('# %6d : %20.17f %20.17f %20.17f\n' %(ikpt,self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.close()
def read_bandstructure_from_file(self,filename):
reader = open(filename,'r')
bs_data = reader.readlines()
reader.close()
self.gvectors = N.identity(3,N.float)
self.kpoint_sampling_type = 'Bandstructure'
self.special_kpoints_indices = list()
self.special_kpoints = list()
for ii in range(len(bs_data)):
if bs_data[ii] == '#BEGIN\n':
ibegin = ii
break
elif bs_data[ii][:10] == '# nsppol =':
self.nsppol = N.int(bs_data[ii][10:])
elif bs_data[ii][:9] == '# nband =':
self.mband = N.int(bs_data[ii][9:])
elif bs_data[ii][:19] == '# eigenvalue_type =':
self.eigenvalue_type = bs_data[ii][19:].strip()
elif bs_data[ii][:17] == '# inputgvectors =':
tt = N.int(bs_data[ii][18])
if tt == 1:
self.inputgvectors = True
elif tt == 0:
self.inputgvectors = False
else:
print 'ERROR: reading inputgvectors ... exit'
sys.exit()
elif bs_data[ii][:15] == '# gvectors(1) =':
sp = bs_data[ii][15:].split()
self.gvectors[0,0] = N.float(sp[0])
self.gvectors[0,1] = N.float(sp[1])
self.gvectors[0,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(2) =':
sp = bs_data[ii][15:].split()
self.gvectors[1,0] = N.float(sp[0])
self.gvectors[1,1] = N.float(sp[1])
self.gvectors[1,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(3) =':
sp = bs_data[ii][15:].split()
self.gvectors[2,0] = N.float(sp[0])
self.gvectors[2,1] = N.float(sp[1])
self.gvectors[2,2] = N.float(sp[2])
elif bs_data[ii][:26] == '# special_kpoints_number =':
special_kpoints_number = N.int(bs_data[ii][26:])
self.special_kpoints_names = ['']*special_kpoints_number
elif bs_data[ii][:22] == '# special_kpt_index':
sp = bs_data[ii][22:].split()
self.special_kpoints_indices.append(N.int(sp[0]))
self.special_kpoints.append(N.array([sp[2],sp[3],sp[4]]))
elif bs_data[ii][:21] == '# special_kpt_name':
sp = bs_data[ii][21:].split()
ispkpt = N.int(sp[0])-1
self.special_kpoints_names[ispkpt] = sp[2][1:-1]
elif bs_data[ii][:22] == '# kpoint_path_length =':
self.kpoint_path_length = N.float(bs_data[ii][22:])
elif bs_data[ii][:22] == '# kpoint_path_number =':
self.nkpt = N.int(bs_data[ii][22:])
elif bs_data[ii][:21] == '# kpoint_path_units =':
kpoint_path_units = bs_data[ii][21:].strip()
self.special_kpoints_indices = N.array(self.special_kpoints_indices,N.int)
self.special_kpoints = N.array(self.special_kpoints,N.float)
if len(self.special_kpoints_indices) != special_kpoints_number or len(self.special_kpoints) != special_kpoints_number:
print 'ERROR: reading the special kpoints ... exit'
sys.exit()
self.kpoint_path_values = N.zeros([self.nkpt],N.float)
self.kpoint_reduced_path_values = N.zeros([self.nkpt],N.float)
if kpoint_path_units == 'bohrm1_units':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
if kpoint_path_units == 'bohrm1_units_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_path_values = self.normalized_kpoint_path_values*self.kpoint_path_length
elif kpoint_path_units == 'reduced_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_reduced_path_values = self.normalized_kpoint_reduced_path_values/self.kpoint_reduced_path_length
elif kpoint_path_units == 'reduced':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
check_nband = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol':
isppol = N.int(bs_data[ii][7:])
elif bs_data[ii][:6] == '#iband':
iband = N.int(bs_data[ii][6:].split()[0])
ikpt = 0
elif bs_data[ii][:4] == '#END':
break
elif bs_data[ii] == '\n':
check_nband = check_nband + 1
else:
self.eigenvalues[isppol,ikpt,iband] = N.float(bs_data[ii].split()[1])
ikpt = ikpt + 1
def check_gw_vs_dft_parameters(dftec,gwec):
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue files do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
for ikpt in range(dftec.nkpt):
if N.absolute(dftec.kpoints[ikpt,0]-gwec.kpoints[ikpt,0]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,1]-gwec.kpoints[ikpt,1]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,2]-gwec.kpoints[ikpt,2]) > csts.TOLKPTS:
print 'ERROR: the kpoints are not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
def plot_gw_vs_dft_eig(dftec,gwec,vbm_index,energy_pivots=None,polyfit_degrees=None):
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue containers do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT containers ... exiting now'
sys.exit()
valdftarray = N.array([],N.float)
conddftarray = N.array([],N.float)
valgwarray = N.array([],N.float)
condgwarray = N.array([],N.float)
for isppol in range(dftec.nsppol):
for ikpt in range(dftec.nkpt):
ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1
ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1
valdftarray = N.append(valdftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
valgwarray = N.append(valgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
conddftarray = N.append(conddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
condgwarray = N.append(condgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
if energy_pivots == None:
if plot_figures == 1:
P.figure(1)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray,'bx')
P.plot(conddftarray,condgwarray,'rx')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW eigenvalues (in eV)')
P.figure(2)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx')
P.plot(conddftarray,condgwarray-conddftarray,'rx')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.show()
return
polyfitlist = list()
if len(polyfit_degrees) == 1:
print 'ERROR: making a fit with only one interval is not allowed ... exiting now'
sys.exit()
dftarray = N.append(valdftarray,conddftarray)
gwarray = N.append(valgwarray,condgwarray)
dftarray_list = list()
gwarray_list = list()
for iinterval in range(len(polyfit_degrees)):
tmpdftarray = N.array([],N.float)
tmpgwarray = N.array([],N.float)
if iinterval == 0:
emin = None
emax = energy_pivots[0]
for ii in range(len(dftarray)):
if dftarray[ii] <= emax:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
elif iinterval == len(polyfit_degrees)-1:
emin = energy_pivots[-1]
emax = None
for ii in range(len(dftarray)):
if dftarray[ii] >= emin:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
else:
emin = energy_pivots[iinterval-1]
emax = energy_pivots[iinterval]
for ii in range(len(dftarray)):
if dftarray[ii] >= emin and dftarray[ii] <= emax:
tmpdftarray = N.append(tmpdftarray,[dftarray[ii]])
tmpgwarray = N.append(tmpgwarray,[gwarray[ii]])
dftarray_list.append(tmpdftarray)
gwarray_list.append(tmpgwarray)
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees[iinterval])
polyfitlist.append(pfit)
if plot_figures == 1:
linspace_npoints = 200
valpoly_x = N.linspace(N.min(valdftarray),N.max(valdftarray),linspace_npoints)
condpoly_x = N.linspace(N.min(conddftarray),N.max(conddftarray),linspace_npoints)
P.figure(3)
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx')
P.plot(conddftarray,condgwarray-conddftarray,'rx')
[x_min,x_max] = P.xlim()
for iinterval in range(len(polyfit_degrees)):
if iinterval == 0:
tmppoly_x = N.linspace(x_min,energy_pivots[iinterval],linspace_npoints)
elif iinterval == len(polyfit_degrees)-1:
tmppoly_x = N.linspace(energy_pivots[iinterval-1],x_max,linspace_npoints)
else:
tmppoly_x = N.linspace(energy_pivots[iinterval-1],energy_pivots[iinterval],linspace_npoints)
P.plot(tmppoly_x,N.polyval(polyfitlist[iinterval],tmppoly_x),'k')
for ipivot in range(len(energy_pivots)):
en = energy_pivots[ipivot]
P.plot([en,en],[N.polyval(polyfitlist[ipivot],en),N.polyval(polyfitlist[ipivot+1],en)],'k-.')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.figure(4)
P.hold(True)
P.grid(True)
for iinterval in range(len(polyfit_degrees)):
P.plot(dftarray_list[iinterval],gwarray_list[iinterval]-dftarray_list[iinterval]-N.polyval(polyfitlist[iinterval],dftarray_list[iinterval]),'bx')
[x_min,x_max] = P.xlim()
P.plot([x_min,x_max],[0,0],'k-')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('Error in the fit (in eV)')
P.show()
return polyfitlist
def compare_bandstructures(ec_ref,ec_test):
nspkpt_ref = len(ec_ref.special_kpoints)
nspkpt_test = len(ec_test.special_kpoints)
if nspkpt_ref != nspkpt_test:
print 'ERROR: The number of special kpoints is different in the two files ... exit'
sys.exit()
eig_type_ref = ec_ref.eigenvalue_type
eig_type_test = ec_test.eigenvalue_type
print eig_type_ref,eig_type_test
if eig_type_ref == 'DFT' and eig_type_test == 'W90':
TOL_KPTS = N.float(1.0e-4)
else:
TOL_KPTS = N.float(1.0e-6)
print TOL_KPTS
for ispkpt in range(nspkpt_ref):
print 'difference between the two :',ec_ref.special_kpoints[ispkpt,:]-ec_test.special_kpoints[ispkpt,:]
if not N.allclose(ec_ref.special_kpoints[ispkpt,:],ec_test.special_kpoints[ispkpt,:],atol=TOL_KPTS):
print 'ERROR: The kpoints are not the same :'
print ' Kpt #%s ' %ispkpt
print ' Reference => %20.17f %20.17f %20.17f' %(ec_ref.special_kpoints[ispkpt,0],ec_ref.special_kpoints[ispkpt,1],ec_ref.special_kpoints[ispkpt,2])
print ' Compared => %20.17f %20.17f %20.17f' %(ec_test.special_kpoints[ispkpt,0],ec_test.special_kpoints[ispkpt,1],ec_test.special_kpoints[ispkpt,2])
print ' ... exit'
sys.exit()
mband_comparison = N.min([ec_ref.mband,ec_test.mband])
if mband_comparison < ec_ref.mband:
print 'Number of bands in the test bandstructure is lower than the number of bands in the reference (%s)' %ec_ref.mband
print ' => Comparison will proceed with %s bands' %ec_test.mband
elif mband_comparison < ec_test.mband:
print 'Number of bands in the reference bandstructure is lower than the number of bands in the test bandstructure (%s)' %ec_test.mband
print ' => Comparison will only proceed with %s bands of the test bandstructure' %ec_ref.mband
else:
print 'Number of bands in the reference and test bandstructure is the same'
print ' => Comparison will proceed with %s bands' %mband_comparison
# eig_test_ref_path = ec_ref.eigenvalues[:,:,:mband_comparison]
rmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
nrmsd_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
mae_per_band = N.zeros([ec_ref.nsppol,mband_comparison],N.float)
for isppol in range(ec_ref.nsppol):
for iband in range(mband_comparison):
interp = N.interp(ec_ref.normalized_kpoint_path_values,ec_test.normalized_kpoint_path_values,ec_test.eigenvalues[isppol,:,iband])
rmsd_per_band[isppol,iband] = N.sqrt(N.sum((csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband])**2)/ec_ref.nkpt)
mae_per_band[isppol,iband] = N.sum(N.abs(csts.hartree2ev*interp-csts.hartree2ev*ec_ref.eigenvalues[isppol,:,iband]))/ec_ref.nkpt
P.figure(1)
P.plot(mae_per_band[0,:])
P.figure(2)
P.plot(rmsd_per_band[0,:])
P.show()
def get_gvectors():
if os.path.isfile('.gvectors.bsinfo'):
print 'File ".gvectors.bsinfo found with the following gvectors information :"'
try:
gvectors_reader = open('.gvectors.bsinfo','r')
gvectors_data = gvectors_reader.readlines()
gvectors_reader.close()
trial_gvectors = N.identity(3,N.float)
trial_gvectors[0,0] = N.float(gvectors_data[0].split()[0])
trial_gvectors[0,1] = N.float(gvectors_data[0].split()[1])
trial_gvectors[0,2] = N.float(gvectors_data[0].split()[2])
trial_gvectors[1,0] = N.float(gvectors_data[1].split()[0])
trial_gvectors[1,1] = N.float(gvectors_data[1].split()[1])
trial_gvectors[1,2] = N.float(gvectors_data[1].split()[2])
trial_gvectors[2,0] = N.float(gvectors_data[2].split()[0])
trial_gvectors[2,1] = N.float(gvectors_data[2].split()[1])
trial_gvectors[2,2] = N.float(gvectors_data[2].split()[2])
print ' gvectors(1) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2])
print ' gvectors(2) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2])
print ' gvectors(3) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2])
except:
print 'ERROR: file ".gvectors.bsinfo" might be corrupted (empty or not formatted correctly ...)'
print ' you should remove the file and start again or check the file ... exit'
sys.exit()
test = raw_input('Press <ENTER> to use these gvectors (any other character to enter manually other gvectors)\n')
if test == '':
gvectors = trial_gvectors
else:
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : \n')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : \n')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : \n')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to overwrite the gvectors contained in the file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
test = raw_input('Do you want to enter the the reciprocal space primitive vectors (y/n)\n')
if test == 'y':
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : ')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : ')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : ')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to write the gvectors to file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[0,0],gvectors[0,1],gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[1,0],gvectors[1,1],gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[2,0],gvectors[2,1],gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
gvectors = None
return gvectors
# Parse the command line options
parser = argparse.ArgumentParser(description='Tool for plotting dft bandstructures')
parser.add_argument('files',help='files to be opened',nargs=1)
args = parser.parse_args()
args_dict = vars(args)
if args_dict['files']:
print 'will open the file'
else:
print 'ERROR: you should provide some bandstructure file ! exiting now ...'
sys.exit()
dft_file = args_dict['files'][0]
gvectors = get_gvectors()
ec_dft = EigenvalueContainer(directory='.',filename=dft_file)
ec_dft.set_kpoint_sampling_type('Bandstructure')
ec_dft.find_special_kpoints(gvectors)
print 'Number of bands in the file : %s' %(N.shape(ec_dft.eigenvalues)[2])
test = raw_input('Enter the number of bands to be plotted (<ENTER> : %s) : \n' %(N.shape(ec_dft.eigenvalues)[2]))
if test == '':
nbd_plot = N.shape(ec_dft.eigenvalues)[2]
else:
nbd_plot = N.int(test)
if nbd_plot > N.shape(ec_dft.eigenvalues)[2]:
print 'ERROR: the number of bands to be plotted is larger than the number available ... exit'
sys.exit()
ec_dft.special_kpoints_names = ['']*len(ec_dft.special_kpoints_indices)
for ii in range(len(ec_dft.special_kpoints_indices)):
ec_dft.special_kpoints_names[ii] = 'k%s' %(ii+1)
print 'List of special kpoints :'
for ii in range(len(ec_dft.special_kpoints_indices)):
spkpt = ec_dft.kpoints[ec_dft.special_kpoints_indices[ii]]
print ' Kpoint %s : %s %s %s' %(ii+1,spkpt[0],spkpt[1],spkpt[2])
print 'Enter the name of the %s special k-points :' %(len(ec_dft.special_kpoints_indices))
test = raw_input('')
if len(test.split()) == len(ec_dft.special_kpoints_indices):
for ii in range(len(ec_dft.special_kpoints_indices)):
ec_dft.special_kpoints_names[ii] = test.split()[ii]
test = raw_input('Enter base name for bandstructure file : \n')
ec_dft.write_bandstructure_to_file('%s.bandstructure' %test)
P.figure(1,figsize=(3.464,5))
P.hold('on')
P.grid('on')
P.xticks(N.take(ec_dft.kpoint_reduced_path_values,N.array(ec_dft.special_kpoints_indices,N.int)),ec_dft.special_kpoints_names)
if ec_dft.nsppol == 1:
for iband in range(nbd_plot):
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
elif ec_dft.nsppol == 2:
for iband in range(nbd_plot):
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
P.plot(ec_dft.kpoint_reduced_path_values,ec_dft.eigenvalues[1,:,iband]*csts.hartree2ev,'r-',linewidth=2)
P.show()
| gpl-3.0 |
robios/PyTES | pytes/Util.py | 1 | 32573 | import warnings
import numpy as np
import time
from struct import unpack
from scipy.stats import norm
from scipy.signal import tukey
from Filter import median_filter
import Analysis, Filter, Constants
def savefits(data, filename, vmax=1.0, sps=1e6, bits=14, noise=False, clobber=True):
"""
Save pulse/noise to FITS file
"""
import pyfits as pf
# Prepare data
data = (np.asarray(data)/vmax*2**(bits-1)).round()
# Column Name
if noise:
colname = 'NoiseRec'
else:
colname = 'PulseRec'
# Columns
col_t = pf.Column(name='TIME', format='1D', unit='s', array=np.zeros(data.shape[0], dtype=int))
col_data = pf.Column(name=colname, format='%dI' % data.shape[1], unit='V', array=data)
cols = pf.ColDefs([col_t, col_data])
tbhdu = pf.BinTableHDU.from_columns(cols)
# Name of extension
exthdr = tbhdu.header
exthdr['EXTNAME'] = ('Record', 'name of this binary table extension')
exthdr['EXTVER'] = (1, 'extension version number')
# Add more attributes
exthdr['TSCAL2'] = (vmax/2**(bits-1), '[V/ch]')
exthdr['TZERO2'] = (0., '[V]')
exthdr['THSCL2'] = (sps**-1, '[s/bin] horizontal resolution of record')
exthdr['THZER2'] = (0, '[s] horizontal offset of record')
exthdr['THSAM2'] = (data.shape[1], 'sample number of record')
exthdr['THUNI2'] = ('s', 'physical unit of sampling step of record')
exthdr['TRMIN2'] = (-2**(bits-1)+1, '[channel] minimum number of each sample')
exthdr['TRMAX2'] = (2**(bits-1)-1, '[channel] maximum number of each sample')
exthdr['TRBIN2'] = (1, '[channel] default bin number of each sample')
# More attributes
exthdr['TSTART'] = (0, 'start time of experiment in total second')
exthdr['TSTOP'] = (0, 'end time of experiment in total second')
exthdr['TEND'] = (0, 'end time of experiment (obsolete)')
exthdr['DATE'] = (time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()), 'file creation date (UT)')
# We anyway need Primary HDU
hdu = pf.PrimaryHDU()
# Write to FITS
thdulist = pf.HDUList([hdu, tbhdu])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
thdulist.writeto(filename, clobber=clobber)
def fopen(filename):
"""
Read FITS file
Parameters
==========
filename: file number to read
Returns
=======
t: time array
wave: waveform array
"""
import pyfits as pf
# Open fits file and get pulse/noise data
header = pf.open(filename)
wave = header[1].data.field(1).copy()
dt = header[1].header['THSCL2']
t = np.arange(wave.shape[-1]) * dt
header.close()
return t, wave
def yopen(filenumber, summary=False, nf=None, tmin=None, tmax=None, raw=False):
"""
Read Yokogawa WVF file
Parameters
==========
filenumber: file number to read
summary: to summary waves (default: False)
nf: sigmas for valid data using median noise filter, None to disable noise filter (default: None)
tmin: lower boundary of time for partial extraction, scaler or list (Default: None)
tmax: upper boundary of time for partial extraction, scaler or list (Default: None)
raw: returns raw data without scaling/offsetting if True (Default: False)
Returns
=======
if summary is False:
[ t1, d1, t2, d2, t3, d3, ... ]
if summary is True:
[ t1, d1, err1, t2, d2, err2, ... ]
if raw is True:
t1 is a tuple of (hres1, hofs1, vres1, vofs1)
where t1 is timing for 1st ch, d1 is data for 1st ch, err1 is error (1sigma) for 1st ch, and so on.
"""
# Read header (HDR)
h = open(str(filenumber) + ".HDR")
lines = h.readlines()
h.close()
# Parse $PublicInfo
for line in lines:
token = line.split()
if len(token) > 0:
# Check endian
if token[0] == "Endian":
endian = '>' if token[1] == "Big" else '<'
# Check data format
if token[0] == "DataFormat":
format = token[1]
assert format == "Block"
# Check # of groups
if token[0] == "GroupNumber":
groups = int(token[1])
# Check # of total traces
if token[0] == "TraceTotalNumber":
ttraces = int(token[1])
# Check data offset
if token[0] == "DataOffset":
offset = int(token[1])
# Initialize containers
traces = [None] * groups # Number of traces for each group
blocks = [None] * ttraces # Number of blocks for each trace
bsizes = [None] * ttraces # Block size for each trace
vres = [None] * ttraces # VResolution for each trace
voffset = [None] * ttraces # VOffset for each trace
hres = [None] * ttraces # HResolution for each trace
hoffset = [None] * ttraces # HOffset for each trace
# Parse $Group
for line in lines:
token = line.split()
if len(token) > 0:
# Read current group number
if token[0][:6] == "$Group":
cgn = int(token[0][6:]) - 1 # Current group number (minus 1)
# Check # of traces in this group
if token[0] == "TraceNumber":
traces[cgn] = int(token[1])
traceofs = np.sum(traces[:cgn], dtype=int)
# Check # of Blocks
if token[0] == "BlockNumber":
blocks[traceofs:traceofs+traces[cgn]] = [ int(token[1]) ] * traces[cgn]
# Check Block Size
if token[0] == "BlockSize":
bsizes[traceofs:traceofs+traces[cgn]] = [ int(s) for s in token[1:] ]
# Check VResolusion
if token[0] == "VResolution":
vres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
# Check VOffset
if token[0] == "VOffset":
voffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
# Check VDataType
if token[0] == "VDataType":
assert token[1] == "IS2"
# Check HResolution
if token[0] == "HResolution":
hres[traceofs:traceofs+traces[cgn]] = [ float(res) for res in token[1:] ]
# Check HOffset
if token[0] == "HOffset":
hoffset[traceofs:traceofs+traces[cgn]] = [ float(ofs) for ofs in token[1:] ]
# Data Initialization
time = [ np.array(range(bsizes[t])) * hres[t] + hoffset[t] for t in range(ttraces) ]
data = [ [None] * blocks[t] for t in range(ttraces) ]
# Open WVF
f = open(str(filenumber) + ".WVF", 'rb')
f.seek(offset)
# Read WVF
if format == "Block":
# Block format (assuming block size is the same for all the traces in Block format)
for b in range(blocks[0]):
for t in range(ttraces):
if raw:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')
else:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]
else:
# Trace format
for t in range(ttraces):
for b in range(blocks[t]):
if raw:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2)), dtype='int64')
else:
data[t][b] = np.array(unpack(endian + 'h'*bsizes[t], f.read(bsizes[t]*2))) * vres[t] + voffset[t]
# Array conversion
for t in range(ttraces):
if raw:
data[t] = np.array(data[t], dtype='int64')
else:
data[t] = np.array(data[t])
# Tmin/Tmax filtering
for t in range(ttraces):
if type(tmin) == list or type(tmax) == list:
if not (type(tmin) == list and type(tmax) == list and len(tmin) == len(tmax)):
raise ValueError("tmin and tmax both have to be list and have to have the same length.")
mask = np.add.reduce([ (time[t] >= _tmin) & (time[t] < _tmax) for (_tmax, _tmin) in zip(tmax, tmin)], dtype=bool)
else:
_tmin = np.min(time[t]) if tmin is None else tmin
_tmax = np.max(time[t]) + 1 if tmax is None else tmax
mask = (time[t] >= _tmin) & (time[t] < _tmax)
data[t] = data[t][:, mask]
time[t] = time[t][mask]
f.close()
if summary is False:
# Return wave data as is
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), data[t] ] for t in range(ttraces) ]
else:
return [ [ time[t], data[t] ] for t in range(ttraces) ]
else:
if nf is None:
# Noise filter is off
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]), np.mean(data[t].astype(dtype='float64'), axis=0), np.std(data[t].astype(dtype='float64'), axis=0, ddof=1) ]
for t in range(ttraces) ]
else:
return [ [ time[t], np.mean(data[t], axis=0), np.std(data[t], axis=0, ddof=1) ]
for t in range(ttraces) ]
else:
# Noise filter is on
if raw:
return [ [ (hres[t], hoffset[t], vres[t], voffset[t]),
np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t].astype(dtype='float64')),
np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t].astype(dtype='float64')) ]
for t in range(ttraces) ]
else:
return [ [ time[t],
np.apply_along_axis(lambda a: np.mean(a[median_filter(a, nf)]), 0, data[t]),
np.apply_along_axis(lambda a: np.std(a[median_filter(a, nf)], ddof=1), 0, data[t]) ]
for t in range(ttraces) ]
def popen(filename, ch=None, raw=False):
"""
Read pls file
Parameters
==========
filename: file name to read
ch: returns data only for the given channel if given (Default: None)
raw: returns raw data without scaling/offsetting if True (Default: False)
Returns
=======
if raw is True:
[ header, vres, vofs, hres, hofs, tick, num, data, edata ]
else:
[ header, t, tick, num, data, edata ]
"""
# Initialize
header = {'COMMENT': []}
vres = {}
vofs = {}
hres = {}
hofs = {}
tick = {}
num = {}
data = {}
edata = {}
# Parser
def parser():
"""
PLS Data Parser (generator)
"""
# Initialization
samples = -1
extra = 0
chunk = ''
isHeader = True
while True:
while len(chunk) < 2:
chunk += yield
# Get the magic character
magic = chunk[0]
if isHeader and magic == 'C':
# Comment
while len(chunk) < 80:
chunk += yield
header['COMMENT'].append(chunk[2:80])
chunk = chunk[80:]
elif isHeader and magic == 'V':
# Version
while len(chunk) < 80:
chunk += yield
header['VERSION'] = chunk[2:80]
chunk = chunk[80:]
elif isHeader and magic == 'O':
# Date
while len(chunk) < 10:
chunk += yield
_m, _d, _y = map(int, chunk[2:10].split())
header['DATE'] = "%d/%d/%d" % (_y, _m, _d)
chunk = chunk[10:]
elif isHeader and magic == 'S':
# Number of Samples
while len(chunk) < 7:
chunk += yield
header['SAMPLES'] = samples = int(chunk[2:7])
chunk = chunk[7:]
elif isHeader and magic == 'E':
# Extra Bytes
while len(chunk) < 7:
chunk += yield
header['EXTRA'] = extra = int(chunk[2:7])
chunk = chunk[7:]
elif isHeader and magic == 'P':
# Discriminator
while len(chunk) < 78:
chunk += yield
_dis = chunk[2:78].split()
if _dis[0] == '01':
header['ULD'] = eval(_dis[1])
elif _dis[0] == '02':
header['LLD'] = eval(_dis[1])
chunk = chunk[78:]
elif isHeader and magic == 'N':
# Normalization
while len(chunk) < 47:
chunk += yield
_ch, _hofs, _hres, _vofs, _vres = chunk[2:47].split()
_ch = int(_ch)
vres[_ch] = eval(_vres)
vofs[_ch] = eval(_vofs)
hres[_ch] = eval(_hres)
hofs[_ch] = eval(_hofs)
chunk = chunk[47:]
elif magic == 'D':
# Data
isHeader = False
if samples < 0:
raise ValueError("Invalid number of samples.")
while len(chunk) < (11 + samples*2):
chunk += yield
_ch, _tick, _num = unpack('<BII', chunk[2:11])
if not data.has_key(_ch):
data[_ch] = bytearray()
tick[_ch] = []
num[_ch] = []
edata[_ch] = bytearray()
data[_ch] += chunk[11:11 + samples*2]
tick[_ch].append(_tick)
num[_ch].append(_num)
edata[_ch] += chunk[11 + samples*2:11 + samples*2 + extra]
chunk = chunk[11 + samples*2 + extra:]
else:
# Skip unknown magic
chunk = chunk[1:]
# Open pls file and read by chunks
f = open(filename, 'rb')
# Start parser
p = parser()
p.next()
# Read by chunk and parse it
with open(filename, 'rb') as f:
while True:
chunk = f.read(1024*1024) # read 1 MB
if not chunk:
break
p.send(chunk)
# Convert buffer to numpy array
for k in ([ch] if ch else data.keys()):
data[k] = np.frombuffer(data[k], dtype='>i2').reshape(-1, header['SAMPLES'])
edata[k] = np.frombuffer(edata[k], dtype='>u1').reshape(-1, header['SAMPLES'])
if raw:
if ch:
return header, vres[ch], vofs[ch], hres[ch], hofs[ch], tick[ch], num[ch], data[ch], edata[ch]
else:
return header, vres, vofs, hres, hofs, tick, num, data, edata
else:
t = {}
for k in ([ch] if ch else data.keys()):
# Normalize data using res/ofs
t[k] = (np.arange(header['SAMPLES']) + hofs[k]) * hres[k]
data[k] = (np.asarray(data[k]) + vofs[k]) * vres[k]
if ch:
return header, t[ch], tick[ch], num[ch], data[ch], edata[ch]
else:
return header, t, tick, num, data, edata
def tesana(t, p, n, lpfc=None, hpfc=None, binsize=1, max_shift=10,
thre=0.4, filt=None, nulldc=False, offset=False, center=False, sigma=3,
gain=None, dsr=None, shift=False, ocmethod="ols", flip=False, atom="Mn",
kbfit=False, ignorekb=False, method="mle",
rshunt=None, tbias=None, ites=None, ka_min=80, kb_min=40,
tex=False, plotting=True, savedat=False, session="Unnamed"):
"""
Perform TES Analysis
Parameters (and their default values):
t: time data (array-like)
p: pulse data (array-like)
n: noise data (array-like)
lpfc: low-pass filter cut-off frequency in bins (Default: None)
hpfc: high-pass filter cut-off frequency in bins (Default: None)
binsize: energy bin size for histograms and fittings (only for ls ans cs) in eV (Default: 1)
max_shift: maximum allowed shifts to calculate maximum cross correlation (Default: 10)
thre: correlation threshold for offset correction (Default: 0.4)
filt: window function (hanning/hamming/blackman/tukey) (Default: None)
nulldc: nullify the DC bin when template generation (Default: False)
offset: subtract DC offset (Default: False)
center: centering pulse rise (Default: False)
sigma: sigmas for median filter (Default: 3)
gain: feedback gain for current-space conversion (Default: None)
dsr: down-sampling rate (Default: None)
shift: treat dE as energy shift instead of scaling (Default: False)
ocmethod: offset correction fitting method (ols/odr) (Default: ols)
flip: flip x and y when offset correction fitting (Default: False)
atom: atom to fit (Default: Mn)
kbfit: fit Kb line (Default: False)
ignorekb: ignore Kb line when linearity correction (Default: False)
method: fitting method (mle/ls/cs) (Default: mle)
rshunt: shunt resistance value for r-space conversion (Default: None)
tbias: TES bias current for r-space conversion (Default: None)
ites: TES current for r-space conversion (Default: None)
ka_min: minimum counts to group bins for Ka line (valid only for ls/cs fittings) (Default: 80)
ka_min: minimum counts to group bins for Kb line (valid only for ls/cs fittings) (Default: 20)
tex: use TeX for plots (Default: False)
plotting: generate and save plots (Default: True)
savedat: save data to files (Default: False)
session: session name for plots and data files (Default: Unnamed)
Note:
- Use offset option when using filt option
- Consider using center option when using filt option
"""
if plotting:
# Import matplotlib
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['text.usetex'] = str(tex)
from pylab import figure, plot, errorbar, hist, axvline, xlim, ylim, loglog, xlabel, ylabel, legend, tight_layout, savefig
print "Session: %s" % session
# Preparation
p = np.asarray(p)
n = np.asarray(n)
t = np.asarray(t)
dt = np.diff(t)[0]
df = (dt * t.shape[-1])**-1
# Subtract offset
if offset:
ofs = np.median(n)
p -= ofs
n -= ofs
# Convert to current-space if needed
if gain:
print "Converting to current-space"
p /= gain
n /= gain
# Convert to resistance-space
Rspace = False
if gain and rshunt and tbias and ites:
print "Converting to resistance-space"
ofs = np.median(n)
p += (ites - ofs)
n += (ites - ofs)
# Convert to resistance
p = (tbias - p) * rshunt / p
n = (tbias - n) * rshunt / n
Rspace = True
# Down-sample
if dsr > 1:
p = p[:,:p.shape[-1]/dsr*dsr].reshape(p.shape[0], -1, dsr).mean(axis=-1)
n = n[:,:n.shape[-1]/dsr*dsr].reshape(n.shape[0], -1, dsr).mean(axis=-1)
dt *= dsr
t = t[::dsr]
# Pulse centering (for filtering)
if center:
# Roll pulse to the center
r = p.shape[-1] / 2 - np.median(abs(p - Filter.offset(p)[:, np.newaxis]).argmax(axis=-1))
p = np.hstack((p[...,-r:], p[...,:-r]))
# Calculate offset (needs to be done before applying filter)
if p.size > 0:
offset = Filter.offset(p)
# Generate Filter
if filt is None:
pass
else:
if filt.lower() == "hanning":
f = np.hanning(p.shape[-1])
elif filt.lower() == "hamming":
f = np.hamming(p.shape[-1])
elif filt.lower() == "blackman":
f = np.blackman(p.shape[-1])
elif filt.lower() == "tukey":
f = tukey(p.shape[-1])
else:
raise ValueError('Unsupported filter: %s' % filt.lower())
print "Window filter function: %s" % filt.lower()
# Amplitude correction
cf = f.sum() / len(f)
p *= (f / cf)
n *= (f / cf)
# Equivalent noise bandwidth correction
enb = len(f)*(f**2).sum()/f.sum()**2
df *= enb
if p.size > 0:
# Calculate averaged pulse
avgp = Filter.average_pulse(p, max_shift=max_shift)
if savedat:
np.savetxt('%s-averagepulse.dat' % session, np.vstack((t, avgp)).T,
header='Time (s), Averaged Pulse (%s)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
figure()
plot(t, avgp)
xlabel('Time$\quad$(s)')
ylabel('Averaged Pulse$\quad$(%s)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-averagepulse.pdf' % session)
# Calculate averaged pulse spectrum
avgps = np.sqrt(Filter.power(avgp)) / df
if savedat:
np.savetxt('%s-avgpulse-power.dat' % session, np.vstack((np.arange(len(avgps))*df, avgps)).T,
header='Frequency (Hz), Average Pulse Power (%s/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
avgps[0] = 0 # for better plot
figure()
plot(np.arange(len(avgps))*df, avgps)
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('Average Pulse Power$\quad$(%s/Hz)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-avgpulse-power.pdf' % session)
if n.size > 0:
# Plot noise spectrum
avgns = np.sqrt(Filter.average_noise(n) / df)
if savedat:
np.savetxt('%s-noise.dat' % session, np.vstack((np.arange(len(avgns))*df, avgns)).T,
header='Frequency (Hz), Noise (%s/srHz)' % ('R' if Rspace else ('A' if gain else 'V')), delimiter='\t')
if plotting:
avgns[0] = 0 # for better plot
figure()
plot(np.arange(len(avgns))*df, avgns)
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('Noise$\quad$(%s/$\sqrt{\mathrm{Hz}}$)' % ('R' if Rspace else ('A' if gain else 'V')))
tight_layout()
savefig('%s-noise.pdf' % session)
if p.size > 0 and n.size > 0:
# Generate template
tmpl, sn = Filter.generate_template(p, n, lpfc=lpfc, hpfc=hpfc, nulldc=nulldc, max_shift=max_shift)
if savedat:
np.savetxt('%s-template.dat' % session, np.vstack((t, tmpl)).T,
header='Time (s), Template (A.U.)', delimiter='\t')
np.savetxt('%s-sn.dat' % session, np.vstack((np.arange(len(sn))*df, sn/np.sqrt(df))).T,
header='Frequency (Hz), S/N (/srHz)', delimiter='\t')
if plotting:
# Plot template
figure()
plot(t, tmpl)
xlabel('Time$\quad$(s)')
ylabel('Template$\quad$(A.U.)')
tight_layout()
savefig('%s-template.pdf' % session)
# Plot SNR
figure()
plot(np.arange(len(sn))*df, sn/np.sqrt(df))
loglog()
xlabel('Frequency$\quad$(Hz)')
ylabel('S/N$\quad$(/$\sqrt{\mathrm{Hz}}$)')
tight_layout()
savefig('%s-sn.pdf' % session)
# Calculate baseline resolution
print "Resolving power: %.2f (%.2f eV @ 5.9 keV)" % (np.sqrt((sn**2).sum()*2), Analysis.baseline(sn))
# Perform optimal filtering
pha_p = Filter.optimal_filter(p, tmpl, max_shift=max_shift)
pha_n = Filter.optimal_filter(n, tmpl, max_shift=0)
# Offset correction
(a, b), coef = Analysis.fit_offset(pha_p, offset, sigma=sigma, method=ocmethod, flip=flip)
if coef > thre:
oc_pha_p = Analysis.offset_correction(pha_p, offset, b)
oc_pha_n = Analysis.offset_correction(pha_n, offset, b)
print "Offset correction with: PHA = %f * (1 + %f * Offset)" % (a, b)
if plotting:
figure()
ka = Analysis.ka(np.vstack((pha_p, offset)).T, sigma=sigma)
plot(ka.T[1], ka.T[0], '.', c='k')
x_min, x_max = xlim()
ofs = np.linspace(x_min, x_max)
label = '$\mathrm{PHA}=%.2f\\times(1+%.2f\\times\mathrm{Offset})$' % (a, b)
plot(ofs, a*(1+b*ofs), 'r-', label=label)
xlabel('Offset$\quad$(V)')
ylabel('PHA$\quad$(V)')
legend(frameon=False)
tight_layout()
savefig('%s-offset.pdf' % session)
else:
oc_pha_p = pha_p
oc_pha_n = pha_n
print "Skipped offset correction: correlation coefficient (%f) is too small" % coef
# Check line database
if "%sKa" % atom not in Constants.LE.keys() or "%sKb" % atom not in Constants.LE.keys():
raise ValueError('Unsupported atom: %s' % atom)
# Linearity correction
pha_line_center = np.asarray([ np.median(Analysis.ka(oc_pha_p, sigma=sigma)), np.median(Analysis.kb(oc_pha_p, sigma=sigma)) ])
line_energy = np.asarray([ Constants.LE['%sKa' % atom], Constants.LE['%sKb' % atom] ])
if ignorekb:
a, b = Analysis.fit_linearity([pha_line_center[0]], [line_energy[0]], deg=1)
print "Linearity correction with: PHA = %e * E" % (b)
else:
a, b = Analysis.fit_linearity(pha_line_center, line_energy, deg=2)
print "Linearity correction with: PHA = %e * E^2 + %e * E" % (a, b)
print "MnKb saturation ratio: %.2f %%" % ((pha_line_center[1]/pha_line_center[0])/(line_energy[1]/line_energy[0])*100)
lc_pha_p = Analysis.linearity_correction(oc_pha_p, a, b)
lc_pha_n = Analysis.linearity_correction(oc_pha_n, a, b)
if savedat:
np.savetxt('%s-linearity.dat' % session, array([pha_line_center[0]]) if ignorekb else pha_line_center[np.newaxis,:],
header='%sKa PHA' % atom if ignorekb else '%sKa PHA, %sKb PHA' % (atom, atom), delimiter='\t')
if plotting:
figure()
x = np.linspace(0, 7e3)
if ignorekb:
plot(line_energy[0]/1e3, pha_line_center[0], '+', color='b')
plot(x/1e3, x*b, 'r--')
else:
plot(line_energy/1e3, pha_line_center, '+', color='b')
plot(x/1e3, x**2*a+x*b, 'r--')
xlim((0, 7))
xlabel('Energy$\quad$(keV)')
ylabel('PHA$\quad$(a.u.)')
tight_layout()
savefig('%s-linearity.pdf' % session)
# Energy Spectrum
if plotting:
figure()
hcount, hbin, hpatch = hist(lc_pha_p[lc_pha_p==lc_pha_p]/1e3, bins=7000/binsize, histtype='stepfilled', color='y')
xlim(0, 7)
xlabel('Energy$\quad$(keV)')
ylabel('Count')
tight_layout()
savefig('%s-spec.pdf' % session)
if savedat:
hcount, hbin = np.histogram(lc_pha_p[lc_pha_p==lc_pha_p]/1e3, bins=7000/binsize)
np.savetxt('%s-spec.dat' % session, np.vstack(((hbin[1:]+hbin[:-1])/2, hcount)).T,
header='Energy (keV), Count', delimiter='\t')
# Line fitting
def _line_fit(data, min, line):
# Fit
(dE, width), (dE_error, width_error), e = Analysis.fit(data, binsize=binsize, min=min, line=line, shift=shift, method=method)
if method == "cs":
chi_squared, dof = e
if method in ("mle", "ls"):
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV" \
% (line, width, width_error, dE)
elif method == "cs":
print "%s: %.2f +/- %.2f eV @ Ec%+.2f eV (Red. chi^2 = %.1f/%d = %.2f)" \
% (line, width, width_error, dE, chi_squared, dof, chi_squared/dof)
return dE, width, width_error
def _line_spectrum(data, min, line, dE, width, width_error):
# Draw histogram
n, bins = Analysis.histogram(data, binsize=binsize)
if method in ("cs"):
gn, gbins = Analysis.group_bin(n, bins, min=min)
else:
# No grouping in mle and ls
gn, gbins = n, bins
ngn = gn/(np.diff(gbins))
ngn_sigma = np.sqrt(gn)/(np.diff(gbins))
cbins = (gbins[1:]+gbins[:-1])/2
if plotting:
figure()
if width_error is not None:
label = 'FWHM$=%.2f\pm %.2f$ eV' % (width, width_error)
else:
label = 'FWHM$=%.2f$ eV (Fixed)' % width
if method == "cs":
errorbar(cbins, ngn, yerr=ngn_sigma, xerr=np.diff(gbins)/2, capsize=0, ecolor='k', fmt=None, label=label)
else:
hist(data, bins=gbins, weights=np.ones(len(data))/binsize, histtype='step', ec='k', label=label)
E = np.linspace(bins.min(), bins.max(), 1000)
model = Analysis.normalization(ngn, gbins, dE, width, line=line, shift=shift) \
* Analysis.line_model(E, dE, width, line=line, shift=shift, full=True)
# Plot theoretical model
plot(E, model[0], 'r-')
# Plot fine structures
for m in model[1:]:
plot(E, m, 'b--')
xlabel('Energy$\quad$(eV)')
ylabel('Normalized Count$\quad$(count/eV)')
legend(frameon=False)
ymin, ymax = ylim()
ylim(ymin, ymax*1.1)
tight_layout()
savefig("%s-%s.pdf" % (session, line))
if savedat:
np.savetxt('%s-%s.dat' % (session, line), np.vstack((cbins, gn)).T,
header='Energy (keV), Count', delimiter='\t')
## Ka
ka = Analysis.ka(lc_pha_p, sigma=sigma)
dE, width, width_error = _line_fit(ka, ka_min, "%sKa" % atom)
_line_spectrum(ka, ka_min, "%sKa" % atom, dE, width, width_error)
## Kb
kb = Analysis.kb(lc_pha_p, sigma=sigma)
if kbfit:
dE, width, width_error = _line_fit(kb, kb_min, "%sKb" % atom)
else:
width_error = None
_line_spectrum(kb, kb_min, "%sKb" % atom, dE, width, width_error)
## Baseline
f_pha_n = lc_pha_n[Filter.median_filter(lc_pha_n, sigma=sigma)]
baseline = Analysis.sigma2fwhm(np.std(f_pha_n))
print "Baseline resolution: %.2f eV" % baseline
n, bins = Analysis.histogram(f_pha_n, binsize=binsize)
if savedat:
np.savetxt('%s-baseline.dat' % session, np.vstack(((bins[1:]+bins[:-1])/2, n)).T,
header='Energy (keV), Count', delimiter='\t')
if plotting:
figure()
label = 'FWHM$=%.2f$ eV' % baseline
hist(f_pha_n, bins=bins, weights=np.ones(len(f_pha_n))/binsize, histtype='step', ec='k', label=label)
mu, sigma = norm.fit(f_pha_n)
E = np.linspace(bins.min(), bins.max(), 1000)
plot(E, norm.pdf(E, loc=mu, scale=sigma)*len(f_pha_n), 'r-')
xlabel('Energy$\quad$(eV)')
ylabel('Normalized Count$\quad$(count/eV)')
legend(frameon=False)
tight_layout()
savefig('%s-baseline.pdf' % session) | mit |
mwv/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
jmontgom10/Mimir_pyPol | oldCode/04b_avgBAABditherHWPimages.py | 1 | 17054 | # -*- coding: utf-8 -*-
"""
Combines all the images for a given (TARGET, FILTER, HWP) combination to
produce a single, average image.
Estimates the sky background level of the on-target position at the time of the
on-target observation using a bracketing pair of off-target observations through
the same HWP polaroid rotation value. Subtracts this background level from
each on-target image to produce background free images. Applies an airmass
correction to each image, and combines these final image to produce a background
free, airmass corrected, average image.
"""
# Core imports
import os
import sys
import copy
import warnings
# Import scipy/numpy packages
import numpy as np
from scipy import ndimage
# Import astropy packages
from astropy.table import Table
import astropy.units as u
from astropy.convolution import Gaussian2DKernel
from astropy.modeling import models, fitting
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import (make_source_mask,
MedianBackground, SigmaClip, Background2D)
# Import plotting utilities
from matplotlib import pyplot as plt
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is a list of targets for which to process each subgroup (observational
# group... never spanning multiple nights, etc...) instead of combining into a
# single "metagroup" for all observations of that target. The default behavior
# is to go ahead and combine everything into a single, large "metagroup". The
# calibration data should probably not be processed as a metagroup though.
processSubGroupList = []
processSubGroupList = [t.upper() for t in processSubGroupList]
# Define the location of the PPOL reduced data to be read and worked on
PPOL_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611\\'
S3_dir = os.path.join(PPOL_data, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611'
# This is the location of the previously generated masks (step 4)
maskDir = os.path.join(pyPol_data, 'Masks')
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
HWPDir = os.path.join(polarimetryDir, 'HWPImgs')
if (not os.path.isdir(HWPDir)):
os.mkdir(HWPDir, 0o755)
bkgPlotDir = os.path.join(HWPDir, 'bkgPlots')
if (not os.path.isdir(bkgPlotDir)):
os.mkdir(bkgPlotDir, 0o755)
# # Setup PRISM detector properties
# read_noise = 13.0 # electrons
# effective_gain = 3.3 # electrons/ADU
#########
### Establish the atmospheric extinction (magnitudes/airmass)
#########
# Following table from Hu (2011)
# Data from Gaomeigu Observational Station
# Passband | K'(lambda) [mag/airmass] | K'' [mag/(color*airmass)]
# U 0.560 +/- 0.023 0.061 +/- 0.004
# B 0.336 +/- 0.021 0.012 +/- 0.003
# V 0.198 +/- 0.024 -0.015 +/- 0.004
# R 0.142 +/- 0.021 -0.067 +/- 0.005
# I 0.093 +/- 0.020 0.023 +/- 0.006
# Following table from Schmude (1994)
# Data from Texas A & M University Observatory
# Passband | K(lambda) [mag/airmass] | dispersion on K(lambda)
# U 0.60 +/- 0.05 0.120
# B 0.40 +/- 0.06 0.165
# V 0.26 +/- 0.03 0.084
# R 0.19 +/- 0.03 0.068
# I 0.16 +/- 0.02 0.055
# TODO: Ask Dan about atmospheric extinction from airmass at NIR
kappa = dict(zip(['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K' ],
[0.60, 0.40, 0.26, 0.19, 0.16, 0.05, 0.01, 0.005]))
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Determine which parts of the fileIndex pertain to HEX dither science images
useFiles = np.logical_and(
fileIndex['USE'] == 1,
fileIndex['DITHER_TYPE'] == 'ABBA'
)
useFileRows = np.where(useFiles)
# Cull the file index to only include files selected for use
fileIndex = fileIndex[useFileRows]
# Define an approximate pixel scale
pixScale = 0.5789*(u.arcsec/u.pixel)
# TODO: implement a FWHM seeing cut... not yet working because PSF getter seems
# to be malfunctioning in step 2
#
#
# # Loop through each unique GROUP_ID and test for bad seeing conditions.
# groupByID = fileIndex.group_by(['GROUP_ID'])
# for subGroup in groupByID.groups:
# # Grab the FWHM values for this subGroup
# thisFWHMs = subGroup['FWHM']*u.pixel
#
# # Grab the median and standard deviation of the seeing for this subgroup
# medianSeeing = np.median(thisFWHMs)
# stdSeeing = np.std(thisFWHMs)
#
# # Find bad FWHM values
# badFWHMs = np.logical_not(np.isfinite(subGroup['FWHM']))
# badFWHMs = np.logical_or(
# badFWHMs,
# thisFWHMs <= 0
# )
# badFWHM = np.logical_and(
# badFWHM,
# thisFWHMs > 2.0*u.arcsec
# )
# import pdb; pdb.set_trace()
# Group the fileIndex by...
# 1. Target
# 2. Waveband
fileIndexByTarget = fileIndex.group_by(['TARGET', 'FILTER'])
# Loop through each group
for group in fileIndexByTarget.groups:
# Grab the current group information
thisTarget = str(np.unique(group['TARGET'].data)[0])
thisFilter = str(np.unique(group['FILTER'].data)[0])
# # Skip the Merope nebula for now... not of primary scientific importance
# if thisTarget == 'MEROPE': continue
# Update the user on processing status
print('\nProcessing images for')
print('Target : {0}'.format(thisTarget))
print('Filter : {0}'.format(thisFilter))
# Grab the atmospheric extinction coefficient for this wavelength
thisKappa = kappa[thisFilter]
# Further divide this group by its constituent HWP values
indexByPolAng = group.group_by(['IPPA'])
# Loop over each of the HWP values, as these are independent from
# eachother and should be treated entirely separately from eachother.
for IPPAgroup in indexByPolAng.groups:
# Grab the current HWP information
thisIPPA = np.unique(IPPAgroup['IPPA'].data)[0]
# Update the user on processing status
print('\tIPPA : {0}'.format(thisIPPA))
# For ABBA dithers, we need to compute the background levels on a
# sub-group basis. If this target has not been selected for subGroup
# averaging, then simply append the background subtracted images to a
# cumulative list of images to align and average.
# Initalize an image list to store all the images for this
# (target, filter, pol-ang) combination
imgList = []
indexByGroupID = IPPAgroup.group_by(['GROUP_ID'])
for subGroup in indexByGroupID.groups:
# Grab the numae of this subGroup
thisSubGroup = str(np.unique(subGroup['OBJECT'])[0])
# if (thisSubGroup != 'NGC2023_R1') and (thisSubGroup != 'NGC2023_R2'): continue
# Construct the output file name and test if it alread exsits.
if thisTarget in processSubGroupList:
outFile = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])
outFile = os.path.join(HWPDir, outFile) + '.fits'
elif thisTarget not in processSubGroupList:
outFile = '_'.join([thisTarget, thisFilter, str(thisIPPA)])
outFile = os.path.join(HWPDir, outFile) + '.fits'
# Test if this file has already been constructed and either skip
# this subgroup or break out of the subgroup loop.
if os.path.isfile(outFile):
print('\t\tFile {0} already exists...'.format(os.path.basename(outFile)))
if thisTarget in processSubGroupList:
continue
elif thisTarget not in processSubGroupList:
break
# Update the user on the current execution status
print('\t\tProcessing images for subgroup {0}'.format(thisSubGroup))
# Initalize lists to store the A and B images.
AimgList = []
BimgList = []
# Initalize a list to store the off-target sky background levels
BbkgList = []
# Initilaze lists to store the times of observation
AdatetimeList = []
BdatetimeList = []
# Read in all the images for this subgroup
progressString = '\t\tNumber of Images : {0}'
for iFile, filename in enumerate(subGroup['FILENAME']):
# Update the user on processing status
print(progressString.format(iFile+1), end='\r')
# Read in a temporary compy of this image
PPOL_file = os.path.join(S3_dir, filename)
tmpImg = ai.reduced.ReducedScience.read(PPOL_file)
# Crop the edges of this image
ny, nx = tmpImg.shape
binningArray = np.array(tmpImg.binning)
# Compute the amount to crop to get a 1000 x 1000 image
cy, cx = (ny - 1000, nx - 1000)
# Compute the crop boundaries and apply them
lf = np.int(np.round(0.5*cx))
rt = lf + 1000
bt = np.int(np.round(0.5*cy))
tp = bt + 1000
tmpImg = tmpImg[bt:tp, lf:rt]
# Grab the on-off target value for this image
thisAB = subGroup['AB'][iFile]
# Place the image in a list and store required background values
if thisAB == 'B':
# Place B images in the BimgList
BimgList.append(tmpImg)
# Place the median value of this off-target image in list
mask = make_source_mask(
tmpImg.data, snr=2, npixels=5, dilate_size=11
)
mean, median, std = sigma_clipped_stats(
tmpImg.data, sigma=3.0, mask=mask
)
BbkgList.append(median)
# Place the time of this image in a list of time values
BdatetimeList.append(tmpImg.julianDate)
if thisAB == 'A':
# Read in any associated masks and store them.
maskFile = os.path.join(maskDir, os.path.basename(filename))
# If there is a mask for this file, then apply it!
if os.path.isfile(maskFile):
# Read in the mask file
tmpMask = ai.reduced.ReducedScience.read(maskFile)
# Crop the mask to match the shape of the original image
tmpMask = tmpMask[cy:ny-cy, cx:nx-cx]
# Grab the data to be masked
tmpData = tmpImg.data
# Mask the data and put it back into the tmpImg
maskInds = np.where(tmpMask.data)
tmpData[maskInds] = np.NaN
tmpImg.data = tmpData
# Place B images in the BimgList
AimgList.append(tmpImg)
# Place the time of this image in a list of time values
AdatetimeList.append(tmpImg.julianDate)
# Create a new line for shell output
print('')
# Construct an image stack of the off-target images
BimageStack = ai.utilitywrappers.ImageStack(BimgList)
# Build a supersky image from these off-target images
superskyImage = BimageStack.produce_supersky()
import pdb; pdb.set_trace()
# Locate regions outside of a 5% deviation
tmpSuperskyData = superskyImage.data
maskedPix = np.abs(tmpSuperskyData - 1.0) > 0.05
# Get rid of the small stuff and expand the big stuff
maskedPix = ndimage.binary_opening(maskedPix, iterations=2)
maskedPix = ndimage.binary_closing(maskedPix, iterations=2)
maskedPix = ndimage.binary_dilation(maskedPix, iterations=4)
# TODO: Make the box_size and filter_size sensitive to binning.
binningArray = np.array(superskyImage.binning)
box_size = tuple((100/binningArray).astype(int))
filter_size = tuple((10/binningArray).astype(int))
# Setup the sigma clipping and median background estimators
sigma_clip = SigmaClip(sigma=3., iters=10)
bkg_estimator = MedianBackground()
# Compute a smoothed background image
bkgData = Background2D(superskyImage.data,
box_size=box_size, filter_size=filter_size, mask=maskedPix,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
# Construct a smoothed supersky image object
smoothedSuperskyImage = ai.reduced.ReducedScience(
bkgData.background/bkgData.background_median,
uncertainty = bkgData.background_rms,
properties={'unit':u.dimensionless_unscaled}
)
# Interpolate background values to A times
AbkgList = np.interp(
AdatetimeList,
BdatetimeList,
BbkgList,
left=-1e6,
right=-1e6
)
# Cut out any extrapolated data (and corresponding images)
goodInds = np.where(AbkgList > -1e5)
AimgList = np.array(AimgList)[goodInds]
AdatetimeList = np.array(AdatetimeList)[goodInds]
AbkgList = AbkgList[goodInds]
AsubtractedList = []
# Loop through the on-target images and subtract background values
for Aimg, Abkg in zip(AimgList, AbkgList):
# Subtract the interpolated background values from the A images
tmpImg = Aimg - smoothedSuperskyImage*(Abkg*Aimg.unit)
# Apply an airmass correction
tmpImg = tmpImg.correct_airmass(thisKappa)
# Append the subtracted and masked image to the list.
AsubtractedList.append(tmpImg)
# Now that the images have been fully processed, pause to generate
# a plot to store in the "background plots" folder. These plots
# constitute a good sanity check on background subtraction.
plt.plot(BdatetimeList, BbkgList, '-ob')
plt.scatter(AdatetimeList, AbkgList, marker='o', facecolor='r')
plt.xlabel('Julian Date')
plt.ylabel('Background Value [ADU]')
figName = '_'.join([thisTarget, thisSubGroup, str(thisIPPA)])
figName = os.path.join(bkgPlotDir, figName) + '.png'
plt.savefig(figName, dpi=300)
plt.close('all')
# Here is where I need to decide if each subgroup image should be
# computed or if I should just continue with the loop.
if thisTarget.upper() in processSubGroupList:
# Construct an image combiner for the A images
AimgStack = ai.utilitywrappers.ImageStack(AsubtractedList)
# Align the images
AimgStack.align_images_with_wcs(
subPixel=False,
padding=np.NaN
)
# Combine the images
AoutImg = imgStack.combine_images()
# Save the image
AoutImg.write(outFile, dtype=np.float64)
else:
# Extend the imgList variable with background corrected images
imgList.extend(AsubtractedList)
if len(imgList) > 0:
# At the exit of the loop, process ALL the files from ALL the groups
# Construct an image combiner for the A images
imgStack = ai.utilitywrappers.ImageStack(imgList)
# Align the images
imgStack.align_images_with_wcs(
subPixel=False,
padding=np.NaN
)
# Combine the images
outImg = imgStack.combine_images()
# Save the image
outImg.write(outFile, dtype=np.float64)
print('\nDone computing average images!')
| mit |
BrainTech/openbci | obci/analysis/csp/MLogit.py | 1 | 11792 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""This is a class for Multinomial Logit Regression
Class uses scipy.optimize package for minimalization of a cost function.
The gradient of the cost function is passed to the minimizer.
Piotr Milanowski, November 2011, Warsaw
"""
from scipy.optimize import fmin_ncg, fmin_bfgs, fmin
import numpy as np
import matplotlib.pyplot as plt
def mix(x1, x2, deg=6):
out = np.zeros([len(x1), sum(range(deg+2))])
k = 0
for i in xrange(deg+1):
for j in range(i+1):
out[:,k] = x1**(i-j)*x2**(j)
k += 1
return out
class logit(object):
"""This is a class for a normal two-class logistic regression
The hypothesis of this regression is a sigmoid (logistic, logit) function.
It returns the probability of the data belonging to the first class.
The minimalization of a cost function is based on NCG algorithm from scipy.optimize package.
The regression can account the regularization factors.
"""
def __init__(self, data, classes, labels=None):
"""Initialization of data
A column of ones is added to the data array.
Parameters:
===========
data : 2darray
NxM array. Rows of this array represent data points, columns represent features.
classes : 1darray
a N dimensional vector of classes. Each class is represented by either 0 or 1.
class_dict [= None] : dictionary
a 2 element dictionary that maps classses to their names.
Example:
=========
>>>X = np.random.rand(20, 4) #data
>>>Y = np.random.randint(0,2,20) #classes
>>>labels = ['class 1','class 2']
>>>MLogit.logit(X, Y, labels)
"""
self.dataNo, self.featureNo = data.shape
if len(classes) != self.dataNo:
raise ValueError, 'Not every data point has its target lable!'
#Adding a columns of 1s and normalizing data - NO NORMALIZATION NEEDED
self.X = np.concatenate((np.ones([self.dataNo, 1]), data), axis = 1)
self.Y = classes
def _sigmoid(self, z):
"""This returns the value of a sigmoid function.
Sigmoid/Logistic/Logit finction looks like this:
f(z) = over{1}{1 + exp(-z)}
Parameters:
===========
z : ndarray
the parameter of the function
Returns:
sig : ndarray
values of sigmoid function at z
"""
return 1/(1 + np.exp(-z))
def cost_function(self, theta, reg = 0):
"""The cost function of logit regression model
It looks like this:
J(theta) = -((1/M)*sum_{i=1}^{M}(y_i*log(h(theta;x_i))+(1-y_i)*log(1-h(theta;x_i)))) +
+ (reg/2*m)sum_{i=1}^{N}(theta_i)^2
Parameters:
===========
theta : 1darray
the array of parameters. It's a (N+1) dimensional vector
reg [= 0] : float
the regularization parameter. This parameter penalizes theta being too big (overfitting)
Returns:
========
J : float
the value of cost function for given theta
"""
z = self._sigmoid(np.dot(self.X, theta))
regular = (reg/(2.0*self.dataNo))*sum(theta[1:]*theta[1:])
J = self.Y * np.log(z) + (1 - self.Y)*np.log(1 - z)
J = -(1.0 / self.dataNo) * sum(J)
return regular + J
def gradient_function(self, theta, reg = 0):
"""The gradient of cost function
The gradient looks like this:
g[0] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^0
g[j] = 1/N * sum_{i=1}^{N}(h(theta;x_i) - y_i)*x_i^j - theta[j]*reg/N
Parameters:
===========
theta : 1darray
the vector of parameters
reg : float
the regularization parameter
Returns:
========
fprime : 1darray
the gradient of cost function.
"""
gradient = np.zeros(self.featureNo + 1)
N = 1.0 / self.dataNo
z = np.dot(self.X, theta)
cost = self._sigmoid(z) - self.Y
# gradient[0] = N * sum(cost * self.X[:, 0])
# for j in xrange(self.featureNo):
# gradient[j] = N * sum(cost * self.X[:, j]) - reg * N * theta[j]
gradient = N * np.dot(cost, self.X)
gradient[1:] += reg * N * theta[1:]
return gradient
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Minimizing function
Based on NCG function from scipy.optimize package
Parameters:
===========
maxiter : int
maximal number of iterations
reg [= 0] : float
regularization parameter
initial_gueas [= None] : 1darray
a vector of #features + 1 size. If None zeros will be asumed.
Returns:
========
theta : 1darray
optimal model parameters
"""
if initial_gues is None:
initial_gues = np.zeros(self.featureNo + 1)
out = fmin_bfgs(self.cost_function, initial_gues, \
self.gradient_function, args = ([reg]))
self.theta = out
return out
def predict(self, x, val=0.9):
"""For prediction of x
Returns predicted probability of x being in class 1
"""
x = np.insert(x, 0, 1) #inserting one at the beginning
z = np.dot(x, self.theta)
#if self._sigmoid(z) >=val:
#return 1
#else:
#return 0
return self._sigmoid(z)
def plot_features(self, show=True):
y = self.Y
idx = np.argsort(y)
x = self.X[idx, :]
y = y[idx]
N, feats = x.shape
if feats == 3:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
plt.plot(x1[:,1],x1[:,2],'ro',x2[:,1],x2[:,2],'go')
for x in np.arange(-5, 5, 0.5):
for y in np.arange(-3, 3, 0.5):
if self.predict(np.array([x,y])) <=0.5:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.legend(('Class 0','Class 1'))
if show:
plt.show()
elif feats == 2:
idx1 = np.where(y==1)[0][0]
x1 = x[:idx1, :]
x2 = x[idx1:, :]
for x in np.arange(x1.min(), x1.max(), 0.1):
for y in np.arange(x2.min(), x2.max(), 0.1):
if self.predict(np.array([x,y])) <=0.01:
plt.plot(x,y,'r+')
else:
plt.plot(x,y,'g+')
plt.plot(x1[:,1],'ro',x2[:,1],'go')
if show:
plt.show()
else:
print "More than 2 dimmensions",x.shape
# def plot_fitted(self):
# N, feats = self.X.shape
# if feats == 3:
# x1 = se
def __normalization(self, data):
"""Function normalizes the data
Normalization is done by subtracting the mean of each column from each column member
and dividing by the column variance.
Parameters:
===========
data : 2darray
the data array
Returns:
========
norms : 2darray
normalized values
"""
mean = data.mean(axis = 0)
variance = data.std(axis = 0)
return (data - mean) / variance
class mlogit(logit):
"""This is a multivariate variation of logit model
"""
def __init__(self, data, classes, labels=None):
"""See logit description"""
super(mlogit, self).__init__(data, classes, labels)
self.classesNo, classesIdx = np.unique(classes, return_inverse = True)
self.count_table = np.zeros([len(classes), len(self.classesNo)])
self.count_table[range(len(classes)), classesIdx] = 1.0
def fit(self, maxiter, reg = 0, initial_gues = None):
"""Fitting logit model for multiclass case"""
theta = np.zeros([self.featureNo + 1, len(self.classesNo)])
for i in range(len(self.classesNo)):
self.Y = self.count_table[:,i]
theta[:, i] = super(mlogit, self).fit(maxiter, reg = reg, initial_gues = initial_gues)
self.theta = theta
return theta
def predict(self, x, val=0.9):
"""Class prediction"""
x = np.insert(x, 0, 1)
z = np.dot(x, self.theta)
probs = super(mlogit, self)._sigmoid(z)
idx = np.argmax(probs)
if probs[idx] >= val:
return self.classesNo[idx]
else:
return None
def plot_features(self):
cn = len(self.classesNo)
idx = np.argsort(self.Y)
y = self.Y[idx]
x = self.X[idx,:]
classes = []
if x.shape[1] == 3:
for i in range(cn):
beg, end = np.where(y==i)[0][[0,-1]]
plt.plot(x[beg:end+1, 1], x[beg:end +1, 2],'o')
classes.append('Class'+str(i))
plt.legend(classes)
plt.show()
else:
print "More than 2 dimmesions"
#class diagnostics(object):
# def __init__(self, classifier_obj, division=[0.6, 0.2, 0.2]):
# self.obj = classifier_obj
# self.div = division
# self.N, self.ft = self.obj.dataNo, self.obj.featureNo
# self.cvNo = self.N * division[1]
# self.testNo = self.N * division[2]
# self.trainNo = self.N * division[0]
# def diagnose(self, iters, reg, odrer=1, val=0.9):
# idx = np.linspace(0, self.N-1, self.N)
# TP, FP, TN, FN
# train_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# cv_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# test_ok = {'tp':0,'fp':0,'fn':0,'fp':0}
# X = self.obj.X
# Y = self.obj.Y
# for i in xrange(iters):
# np.random.shuffle(idx)
# train_set = X[idx[:self.trainNo], :]
# cv_set = X[idx[self.trainNo:self.trainNo+self.cvNo], :]
# test_set = X[idx[self.trainNo+self.cvNo:], :]
# classes_train = Y[idx[:self.trainNo], :]
# classes_cv = Y[idx[self.trainNo:self.trainNo+self.cvNo], :]
# classes_test = Y[idx[self.trainNo+self.cvNo:], :]
# Training
# self.obj.X = train_set
# self.obj.Y = classes_train
# self.obj.fit(100)
# for j, row in enumerate(train_set):
# cl = self.obj.predict(row, val)
# if cl == classes_train[j]:
# train_ok['tp'] += 1
# elif cl is None:
# train_ok['fn'] += 1
# else:
# train_ok['fp'] += 1
# Crossvalidation
# for j, row in enumerate(cv_set):
# cl = self.obj.predict(row, val)
# if cl == classes_cv[j]:
# cv_ok['tp'] += 1
# elif cl in None:
# cv_ok['fn'] += 1
# else:
# cv_ok['fp'] += 1
# Test set
# for j, row in enumerate(test_set):
# cl = self.obj.predict(row, val)
# if cl == classes_test[j]:
# test_ok['tp'] += 1
# elif cl is None:
# test_ok['fn'] += 1
# else:
# test_ok['fp'] += 1
# def power_set(self, lst, l):
# """Create a powerset of a list for given length"""
# r = [[]]
# for e in lst:
# r.extend([s + [e] for s in r])
# return set([j for j in r if len(j) <= l])
# def next_order(self, kernel, next_o):
# def make_order(self, p):
# init_featsNo = self.featNo
| gpl-3.0 |
vortex-exoplanet/VIP | vip_hci/negfc/utils_negfc.py | 2 | 8821 | #! /usr/bin/env python
"""
Module with post-processing related functions called from within the NFC
algorithm.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez'
__all__ = ['cube_planet_free']
import numpy as np
from ..metrics import cube_inject_companions
import math
from matplotlib.pyplot import plot, xlim, ylim, axes, gca, show
def cube_planet_free(planet_parameter, cube, angs, psfn, plsc, imlib='opencv',
interpolation='lanczos4',transmission=None):
"""
Return a cube in which we have injected negative fake companion at the
position/flux given by planet_parameter.
Parameters
----------
planet_parameter: numpy.array or list
The (r, theta, flux) for all known companions. For a 4d cube r,
theta and flux must all be 1d arrays with length equal to cube.shape[0];
i.e. planet_parameter should have shape: (n_pl,3,n_ch).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
Returns
-------
cpf : numpy.array
The cube with negative companions injected at the position given in
planet_parameter.
"""
cpf = np.zeros_like(cube)
planet_parameter = np.array(planet_parameter)
if cube.ndim == 4:
if planet_parameter.shape[3] != cube.shape[0]:
raise TypeError("Input planet parameter with wrong dimensions.")
for i in range(planet_parameter.shape[0]):
if i == 0:
cube_temp = cube
else:
cube_temp = cpf
if cube.ndim == 4:
for j in cube.shape[0]:
cpf[j] = cube_inject_companions(cube_temp[j], psfn[j], angs,
flevel=-planet_parameter[i, 2, j],
plsc=plsc,
rad_dists=[planet_parameter[i, 0, j]],
n_branches=1,
theta=planet_parameter[i, 1, j],
imlib=imlib,
interpolation=interpolation,
verbose=False,
transmission=transmission)
else:
cpf = cube_inject_companions(cube_temp, psfn, angs,
flevel=-planet_parameter[i, 2], plsc=plsc,
rad_dists=[planet_parameter[i, 0]],
n_branches=1, theta=planet_parameter[i, 1],
imlib=imlib, interpolation=interpolation,
verbose=False, transmission=transmission)
return cpf
def radial_to_eq(r=1, t=0, rError=0, tError=0, display=False):
"""
Convert the position given in (r,t) into \delta RA and \delta DEC, as
well as the corresponding uncertainties.
t = 0 deg (resp. 90 deg) points toward North (resp. East).
Parameters
----------
r: float
The radial coordinate.
t: float
The angular coordinate.
rError: float
The error bar related to r.
tError: float
The error bar related to t.
display: boolean, optional
If True, a figure illustrating the error ellipse is displayed.
Returns
-------
out : tuple
((RA, RA error), (DEC, DEC error))
"""
ra = (r * np.sin(math.radians(t)))
dec = (r * np.cos(math.radians(t)))
u, v = (ra, dec)
nu = np.mod(np.pi/2-math.radians(t), 2*np.pi)
a, b = (rError,r*np.sin(math.radians(tError)))
beta = np.linspace(0, 2*np.pi, 5000)
x, y = (u + (a * np.cos(beta) * np.cos(nu) - b * np.sin(beta) * np.sin(nu)),
v + (b * np.sin(beta) * np.cos(nu) + a * np.cos(beta) * np.sin(nu)))
raErrorInf = u - np.amin(x)
raErrorSup = np.amax(x) - u
decErrorInf = v - np.amin(y)
decErrorSup = np.amax(y) - v
if display:
plot(u,v,'ks',x,y,'r')
plot((r+rError) * np.cos(nu), (r+rError) * np.sin(nu),'ob',
(r-rError) * np.cos(nu), (r-rError) * np.sin(nu),'ob')
plot(r * np.cos(nu+math.radians(tError)),
r*np.sin(nu+math.radians(tError)),'ok')
plot(r * np.cos(nu-math.radians(tError)),
r*np.sin(nu-math.radians(tError)),'ok')
plot(0,0,'og',np.cos(np.linspace(0,2*np.pi,10000)) * r,
np.sin(np.linspace(0,2*np.pi,10000)) * r,'y')
plot([0,r*np.cos(nu+math.radians(tError*0))],
[0,r*np.sin(nu+math.radians(tError*0))],'k')
axes().set_aspect('equal')
lim = np.amax([a,b]) * 2.
xlim([ra-lim,ra+lim])
ylim([dec-lim,dec+lim])
gca().invert_xaxis()
show()
return ((ra,np.mean([raErrorInf,raErrorSup])),
(dec,np.mean([decErrorInf,decErrorSup])))
def cart_to_polar(y, x, ceny=0, cenx=0):
"""
Convert cartesian into polar coordinates (r,theta) with
respect to a given center (cenx,ceny).
Parameters
----------
x,y: float
The cartesian coordinates.
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-ceny)**2 + (x-cenx)**2)
theta = np.degrees(np.arctan2(y-ceny, x-cenx))
return r, np.mod(theta,360)
def polar_to_cart(r, theta, ceny=0, cenx=0):
"""
Convert polar coordinates with respect to the center (cenx,ceny) into
cartesian coordinates (x,y) with respect to the bottom left corner of the
image..
Parameters
----------
r,theta: float
The polar coordinates.
Returns
-------
out : tuple
The cartesian coordinates (x,y) with respect to the bottom left corner
of the image..
"""
x = r*np.cos(np.deg2rad(theta)) + cenx
y = r*np.sin(np.deg2rad(theta)) + ceny
return x,y
def ds9index_to_polar(y, x, ceny=0, cenx=0):
"""
Convert pixel index read on image displayed with DS9 into polar coordinates
(r,theta) with respect to a given center (cenx,ceny).
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
r = np.sqrt((y-0.5-ceny)**2 + (x-0.5-cenx)**2)
theta = np.degrees(np.arctan2(y-0.5-ceny, x-0.5-cenx))
return r, np.mod(theta,360)
def polar_to_ds9index(r, theta, ceny=0, cenx=0):
"""
Convert position (r,theta) in an image with respect to a given center
(cenx,ceny) into position in the image displayed with DS9.
Note that ds9 index (x,y) = Python matrix index (y,x). Furthermore, when an
image M is displayed with DS9, the coordinates of the center of the pixel
associated with M[0,0] is (1,1). Then, there is a shift of (0.5, 0.5) of the
center of the coordinate system. As a conclusion, when you read
(x_ds9, y_ds9) on a image displayed with DS9, the corresponding position is
(y-0.5, x-0.5) and the associated pixel value is
M(np.floor(y)-1,np.floor(x)-1).
Parameters
----------
x,y: float
The pixel index in DS9
Returns
-------
out : tuple
The polar coordinates (r,theta) with respect to the (cenx,ceny).
Note that theta is given in degrees.
"""
x_ds9 = r*np.cos(np.deg2rad(theta)) + 0.5 + cenx
y_ds9 = r*np.sin(np.deg2rad(theta)) + 0.5 + ceny
return x_ds9, y_ds9 | mit |
ChanChiChoi/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
kcyu1993/ML_course_kyu | projects/project1/scripts/model.py | 1 | 19450 | from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
import copy
from data_utils import build_k_indices
from learning_model import *
from regularizer import *
from helpers import save_numpy_array
import numpy as np
class Model(object):
"""
Author: Kaicheng Yu
Machine learning model engine
Implement the optimizers
sgd
normal equations
cross-validation of given parameters
Abstract method:
__call__ produce the raw prediction, use the latest weight obtained by training
predict produce prediction values, could take weight as input
get_gradient define gradient here, including the gradient for regularizer
normalequ define normal equations
Support:
L1, L2 normalization
Due to the distribution of work, only LogisticRegression is fully tested for
fitting data, and cross-validation.
LinearRegression model should also work but not fully tested.
The goal of this class is not only specific to this learning project, but also for reusable and scalable
to other problems, models.
"""
def __init__(self, train_data, validation=None, initial_weight=None,
loss_function_name='mse', cal_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Initializer of all learning models.
:param train_data: training data.
:param validation_data:
"""
self.train_x = train_data[1]
self.train_y = train_data[0]
self.set_valid(validation)
''' Define the progress of history here '''
self.losses = []
self.iterations = 0
self.weights = []
self.misclass_rate = []
''' Define loss, weight calculation, regularizer '''
self.loss_function = get_loss_function(loss_function_name)
self.loss_function_name = loss_function_name
self.calculate_weight = cal_weight
self.regularizer = Regularizer.get_regularizer(regularizer, regularizer_p)
self.regularizer_p = regularizer_p
# Asserting degree
if len(self.train_x.shape) > 1:
degree = self.train_x.shape[1]
else:
degree = 1
# Initialize the weight for linear model.
if initial_weight is not None:
self.weights.append(initial_weight)
else:
self.weights.append(np.random.rand(degree))
def set_valid(self, validation):
# Set validation here.
self.validation = False
self.valid_x = None
self.valid_y = None
self.valid_losses = None
self.valid_misclass_rate = None
if validation is not None:
(valid_y, valid_x) = validation
self.valid_x = valid_x
self.valid_y = valid_y
self.validation = True
self.valid_losses = []
self.valid_misclass_rate = []
@abstractmethod
def __call__(self, **kwargs):
"""Define the fit function and get prediction"""
raise NotImplementedError
@abstractmethod
def get_gradient(self, y, x, weight):
raise NotImplementedError
@abstractmethod
def predict(self, x, weight):
raise NotImplementedError
@abstractmethod
def normalequ(self, **kwargs):
''' define normal equation method to calculate optimal weights'''
raise NotImplementedError
def compute_weight(self, y, x, test_x=None, test_y=None, **kwargs):
""" Return weight under given parameter """
model = copy.copy(self)
model.__setattr__('train_y', y)
model.__setattr__('train_x', x)
if test_x is not None and test_y is not None:
model.set_valid((test_y, test_x))
_kwargs = []
for name, value in kwargs.items():
# Recognize parameter "
if name is "regularizer_p":
model.__setattr__(name, value)
model.regularizer.set_parameter(value)
else:
_kwargs.append((name, value))
_kwargs = dict(_kwargs)
if model.calculate_weight is 'gradient':
return model.sgd(**_kwargs)
# elif model.calculate_weight is 'newton':
# return model.newton(**_kwargs)
elif model.calculate_weight is 'normalequ':
return model.normalequ(**_kwargs)
def get_history(self):
"""
Get the training history of current model
:return: list as [iterations, [losses], [weights], [mis_class]]
"""
if self.validation:
return self.iterations, (self.losses, self.valid_losses), \
(self.weights), (self.misclass_rate, self.valid_misclass_rate)
return self.iterations, self.losses, self.weights, self.misclass_rate
def train(self, optimizer='sgd', loss_function='mse', **kwargs):
"""
Train function to perform one time training
Will based optimizer to select.
TODO: Would add 'newton' in the future
This
:param optimizer: only support 'sgd'
:param loss_function: loss_function name {mse, mae, logistic}
:param kwargs: passed into sgd
:return: best weight
"""
self.loss_function = get_loss_function(loss_function)
self.loss_function_name = loss_function
if optimizer is 'sgd':
self.sgd(**kwargs)
return self.weights[-1]
"""===================================="""
""" Beginning of the optimize Routines """
"""===================================="""
def sgd(self, lr=0.01, decay=0.5, max_iters=1000,
batch_size=128, early_stop=150, decay_intval=50, decay_lim=9):
"""
Define the SGD algorithm here
Implementing weight decay, early stop.
:param lr: learning rate
:param decay: weight decay after fix iterations
:param max_iters: maximum iterations
:param batch_size: batch_size
:param early_stop: early_stop after no improvement
:return: final weight vector
"""
np.set_printoptions(precision=4)
w = self.weights[0]
loss = self.compute_loss(self.train_y, self.train_x, w)
best_loss = loss
best_counter = 0
decay_counter = 0
# print("initial loss is {} ".format(loss))
for epoch in range(max_iters):
for batch_y, batch_x in batch_iter(self.train_y, self.train_x, batch_size):
grad = self.get_gradient(batch_y, batch_x, w)
w = w - lr * grad
loss = self.compute_loss(self.train_y, self.train_x, w)
mis_class = self.compute_metrics(self.train_y, self.train_x, w)
self.weights.append(w)
self.losses.append(loss)
self.misclass_rate.append(mis_class)
if self.validation is True:
valid_loss = self.compute_loss(self.valid_y, self.valid_x, w)
valid_mis_class = self.compute_metrics(self.valid_y, self.valid_x, w)
self.valid_losses.append(valid_loss)
self.valid_misclass_rate.append(valid_mis_class)
# Display every 25 epoch
if (epoch + 1) % 25 == 0:
print('Epoch {e} in {m}'.format(e=epoch + 1, m=max_iters), end="\t")
if self.validation is True:
# print('\tTrain Loss {0:0.4f}, \tTrain mis-class {0:0.4f}, '
# '\tvalid loss {0:0.4f}, \tvalid mis-class {0:0.4f}'.
# format(loss, mis_class, valid_loss, valid_mis_class))
print('\tTrain Loss {}, \tTrain mis-class {}, '
'\tvalid loss {}, \tvalid mis-class {}'.
format(loss, mis_class, valid_loss, valid_mis_class))
else:
print('\tTrain Loss {}, \tTrain mis-class {}'.
format(loss, mis_class))
# judge the performance
if best_loss - loss > 0.000001:
best_loss = loss
best_counter = 0
else:
best_counter += 1
if best_counter > early_stop:
print("Learning early stop since loss not improving for {} epoch.".format(best_counter))
break
if best_counter % decay_intval == 0:
print("weight decay by {}".format(decay))
lr *= decay
decay_counter += 1
if decay_counter > decay_lim:
print("decay {} times, stop".format(decay_lim))
break
return self.weights[-1]
def newton(self, lr=0.01, max_iters=100):
# TODO: implement newton method later
raise NotImplementedError
def cross_validation(self, cv, lambdas, lambda_name, seed=1, skip=False, plot=False, **kwargs):
"""
Cross validation method to acquire the best prediction parameters.
It will use the train_x y as data and do K-fold cross validation.
:param cv: cross validation times
:param lambdas: array of lambdas to be validated
:param lambda_name: the lambda name tag
:param seed: random seed
:param skip: skip the cross validation, only valid 1 time
:param plot plot cross-validation plot, if machine does not
support matplotlib.pyplot, set to false.
:param kwargs: other parameters could pass into compute_weight
:return: best weights, best_lambda, (training error, valid error)
"""
np.set_printoptions(precision=4)
k_indices = build_k_indices(self.train_y, cv, seed)
# define lists to store the loss of training data and test data
err_tr = []
err_te = []
weights = []
print("K-fold ({}) cross validation to examine [{}]".
format(cv, lambdas))
for lamb in lambdas:
print("For lambda: {}".format(lamb))
_mse_tr = []
_mse_te = []
_weight = []
for k in range(cv):
print('Cross valid iteration {}'.format(k))
weight, loss_tr, loss_te = self._loop_cross_validation(self.train_y, self.train_x,
k_indices, k,
lamb, lambda_name, **kwargs)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
_weight.append(weight)
if skip:
break
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
err_tr += [avg_tr]
err_te += [avg_te]
weights.append(_weight)
print("\t train error {}, \t valid error {}".
format(avg_tr, avg_te))
# Select the best parameter during the cross validations.
print('K-fold cross validation result: \n {} \n {}'.
format(err_tr, err_te))
# Select the best based on least err_te
min_err_te = np.argmin(err_te)
print('Best err_te result {}, lambda {}'.
format(err_te[min_err_te], lambdas[min_err_te]))
if plot:
from plots import cross_validation_visualization
cross_validation_visualization(lambdas, err_tr, err_te, title=lambda_name,
error_name=self.loss_function_name)
else:
save_numpy_array(lambdas, err_tr, err_te, names=['lambda', 'err_tr', 'err_te'], title=self.regularizer.name)
return weights[min_err_te], lambdas[min_err_te], (err_tr, err_te)
def _loop_cross_validation(self, y, x, k_indices, k, lamb, lambda_name, **kwargs):
"""
Single loop of cross validation
:param y: train labels
:param x: train data
:param k_indices: indices array
:param k: number of cross validations
:param lamb: lambda to use
:param lambda_name: lambda_name to pass into compute weight
:return: weight, mis_tr, mis_te
"""
train_ind = np.concatenate((k_indices[:k], k_indices[k + 1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# Insert one more kwargs item
kwargs[lambda_name] = lamb
weight = self.compute_weight(train_y, train_x, test_x, test_y, **kwargs)
# Compute the metrics and return
loss_tr = self.compute_metrics(train_y, train_x, weight)
loss_te = self.compute_metrics(test_y, test_x, weight)
return weight, loss_tr, loss_te
def compute_metrics(self, target, data, weight):
"""
Compute the following metrics
Misclassification rate
"""
pred = self.predict(data, weight)
assert len(pred) == len(target)
# Calculate the mis-classification rate:
N = len(pred)
pred = np.reshape(pred, (N,))
target = np.reshape(target, (N,))
nb_misclass = np.count_nonzero(target - pred)
return nb_misclass / N
def compute_loss(self, y, x, weight):
return self.loss_function(y, x, weight)
class LogisticRegression(Model):
""" Logistic regression """
def __init__(self, train, validation=None, initial_weight=None,
loss_function_name='logistic',
calculate_weight='gradient',
regularizer=None, regularizer_p=None):
"""
Constructor of Logistic Regression model
:param train: tuple (y, x)
:param validation: tuple (y, x)
:param initial_weight: weight vector, dim align x
:param loss_function: f(x, y, weight)
:param regularizer: "Ridge" || "Lasso"
:param regularizer_p: parameter
"""
# Initialize the super class with given data.
# Transform the y into {0,1}
y, tx = train
y[np.where(y < 0)] = 0
train = (y, tx)
if validation:
val_y, val_tx = validation
val_y[np.where(val_y < 0)] = 0
validation = (val_y, val_tx)
super(LogisticRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
# Set predicted label
self.pred_label = [-1, 1]
def __call__(self, x, weight=None):
"""
Define the fit function and get prediction,
generate probability of occurrence
"""
if weight is None:
weight = self.weights[-1]
return sigmoid(np.dot(x, weight))
def get_gradient(self, y, x, weight):
""" calculate gradient given data and weight """
y = np.reshape(y, (len(y),))
return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \
+ self.regularizer.get_gradient(weight)
def get_hessian(self, y, x, weight):
# TODO: implement hessian for newton method
raise NotImplementedError
def predict(self, x, weight=None, cutting=0.5):
""" Prediction of event {0,1} """
if weight is None: weight = self.weights[-1]
pred = sigmoid(np.dot(x, weight))
pred[np.where(pred <= cutting)] = 0
pred[np.where(pred > cutting)] = 1
return pred
def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):
""" Prediction result with labels """
if predict_label is None:
predict_label = self.pred_label
if weight is None: weight = self.weights[-1]
pred = self.predict(x, weight, cutting)
pred[np.where(pred == 0)] = predict_label[0]
pred[np.where(pred == 1)] = predict_label[1]
return pred
def train(self, loss_function='logistic',
lr=0.1, decay=0.5, max_iters=3000, batch_size=128, **kwargs):
""" Make the default loss logistic, set default parameters """
return super(LogisticRegression, self).train('sgd', loss_function,
lr=lr,
decay=decay, max_iters=max_iters,
batch_size=batch_size, **kwargs)
def normalequ(self, **kwargs):
""" Should never call """
raise NotImplementedError
class LinearRegression(Model):
""" Linear regression model
This is not fully tested, especially the cross-validation, please refers
to the implemenations.py for linear model.
"""
def __init__(self, train, validation=None, initial_weight=None,
regularizer=None, regularizer_p=None,
loss_function_name='mse', calculate_weight='normalequ'):
# Initialize the super class with given data.
super(LinearRegression, self).__init__(train, validation,
initial_weight=initial_weight,
loss_function_name=loss_function_name,
cal_weight=calculate_weight,
regularizer=regularizer,
regularizer_p=regularizer_p)
def __call__(self, x):
""" calulate prediction based on latest result """
return np.dot(x, self.weights[-1])
def get_gradient(self, batch_y, batch_x, weight):
""" return gradient of linear model, including the regularizer """
N = batch_y.shape[0]
grad = np.empty(len(weight))
for index in range(N):
_y = batch_y[index]
_x = batch_x[index]
grad = grad + gradient_least_square(_y, _x, weight, self.loss_function_name)
grad /= N
grad += self.regularizer.get_gradient(weight)
return grad
def predict(self, x, weight):
""" Prediction function, predicting final result """
pred = np.dot(x, weight)
pred[np.where(pred <= 0)] = -1
pred[np.where(pred > 0)] = 1
return pred
def normalequ(self):
""" Normal equation to get parameters """
tx = self.train_x
y = self.train_y
if self.regularizer is None:
return np.linalg.solve(np.dot(tx.T, tx), np.dot(tx.T, y))
elif self.regularizer.name is 'Ridge':
G = np.eye(tx.shape[1])
G[0, 0] = 0
hes = np.dot(tx.T, tx) + self.regularizer_p * G
return np.linalg.solve(hes, np.dot(tx.T, y))
else:
raise NotImplementedError
| mit |
huongttlan/bokeh | bokeh/compat/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
joelfrederico/SciSalt | scisalt/qt/mplwidget.py | 1 | 13557 | from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar
import matplotlib as _mpl
import numpy as _np
from .Rectangle import Rectangle
import pdb
import traceback
import logging
loggerlevel = logging.DEBUG
logger = logging.getLogger(__name__)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Slider_and_Text(QtGui.QWidget):
valueChanged = QtCore.pyqtSignal(int)
sliderReleased = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.setMaximumHeight(40)
# Enable tracking by default
self._tracking = True
self.hLayout = QtGui.QHBoxLayout()
self.slider = QtGui.QSlider()
self.leftbutton = QtGui.QPushButton()
self.leftbutton.setText("<")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth())
# self.leftbutton.setSizePolicy(sizePolicy)
self.leftbutton.clicked.connect(self._subone)
self.rightbutton = QtGui.QPushButton()
self.rightbutton.setText(">")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth())
# self.rightbutton.setSizePolicy(sizePolicy)
self.rightbutton.clicked.connect(self._addone)
self.v = QtGui.QIntValidator()
self.box = QtGui.QLineEdit()
self.box.setValidator(self.v)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth())
# self.box.setSizePolicy(sizePolicy)
self.hLayout.addWidget(self.leftbutton)
self.hLayout.addWidget(self.slider)
self.hLayout.addWidget(self.box)
self.hLayout.addWidget(self.rightbutton)
self.setLayout(self.hLayout)
self.slider.valueChanged.connect(self._sliderChanged)
self.box.editingFinished.connect(self._textChanged)
self.setOrientation(QtCore.Qt.Horizontal)
# Connect release so tracking works as expected
self.slider.sliderReleased.connect(self._sliderReleased)
def _addone(self):
self.value = self.value + 1
self.valueChanged.emit(self.value)
def _subone(self):
self.value = self.value - 1
self.valueChanged.emit(self.value)
def _sliderReleased(self):
print('Released')
self.sliderReleased.emit(self.slider.value)
def setTracking(self, val):
print('Tracking set to {}'.format(val))
self._tracking = val
def setMaximum(self, val):
self.slider.setMaximum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def setMinimum(self, val):
self.slider.setMinimum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def _sliderChanged(self, val):
self.box.setText(str(val))
if self._tracking:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.valueChanged.emit(val)
else:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.slider.sliderReleased.connect(self._sliderChanged_notracking)
def _sliderChanged_notracking(self):
val = self.slider.value()
# print('Value to be emitted is {}'.format(val))
self.valueChanged.emit(val)
def _textChanged(self):
val = self.box.text()
self.slider.setValue(int(val))
self._sliderChanged_notracking()
def setOrientation(self, *args, **kwargs):
self.slider.setOrientation(*args, **kwargs)
def _getValue(self):
return self.slider.value()
def _setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
value = property(_getValue, _setValue)
def setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
# self.valueChanged.emit(val)
class Mpl_Plot(_FigureCanvas):
def __init__(self, parent=None):
# Initialize things
self.fig = _mpl.figure.Figure()
_FigureCanvas.__init__(self, self.fig)
_FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self)
# Create axes
self.ax = self.fig.add_subplot(111)
def plot(self, *args, **kwargs):
self.ax.clear()
self.ax.plot(*args, **kwargs)
self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
self.ax.figure.canvas.draw()
class Mpl_Image(QtGui.QWidget):
# Signal for when the rectangle is changed
rectChanged = QtCore.pyqtSignal(Rectangle)
def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None):
# Initialize things
QtGui.QWidget.__init__(self)
self.rectbool = rectbool
self._clim_min = 0
self._clim_max = 3600
self._pressed = False
# Add a vertical layout
self.vLayout = QtGui.QVBoxLayout()
# Add a figure
self.fig = _mpl.figure.Figure()
# Add a canvas containing the fig
self.canvas = _FigureCanvas(self.fig)
_FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self.canvas)
# Setup the layout
if toolbarbool:
self.toolbar = _NavigationToolbar(self.canvas, self)
self.toolbar.setMaximumHeight(20)
self.vLayout.addWidget(self.toolbar)
self.vLayout.addWidget(self.canvas)
self.setLayout(self.vLayout)
# Create axes
self.ax = self.fig.add_subplot(111)
# Include rectangle functionality
if rectbool:
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.Rectangle = Rectangle(
x = -10 ,
y = 0 ,
width = 0 ,
height = 3 ,
axes = self.ax
)
# Add image
self.image = image
def _get_img(self):
return self._image
def _set_img(self, image):
self.ax.clear()
self._image = image
if image is not None:
self._imgplot = self.ax.imshow(image, interpolation='none')
if self.rectbool:
self.ax.add_patch(self.Rectangle.get_rect())
# imagemax = _np.max(_np.max(image))
self.set_clim(self._clim_min, self._clim_max)
image = property(_get_img, _set_img)
def set_clim(self, clim_min, clim_max):
if self.image is not None:
self._clim_min = clim_min
self._clim_max = clim_max
self._imgplot.set_clim(clim_min, clim_max)
self.ax.figure.canvas.draw()
def on_press(self, event):
if self.toolbar._active is None:
self._pressed = True
self.x0 = event.xdata
self.y0 = event.ydata
logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0))
def on_release(self, event):
if self._pressed:
self._pressed = False
print('release')
self.x1 = event.xdata
self.y1 = event.ydata
width = self.x1 - self.x0
height = self.y1 - self.y0
logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format(
self.x0 ,
self.y0 ,
self.x1 ,
self.y1 ,
width ,
height
)
)
self.Rectangle.set_xy((self.x0, self.y0))
self.Rectangle.set_width(width)
self.Rectangle.set_height(height)
self.ax.figure.canvas.draw()
self.rectChanged.emit(self.Rectangle)
# print(self.rect)
def zoom_rect(self, border=None, border_px=None):
# ======================================
# Get x coordinates
# ======================================
x0 = self.Rectangle.get_x()
width = self.Rectangle.get_width()
x1 = x0+width
# ======================================
# Get y coordinates
# ======================================
y0 = self.Rectangle.get_y()
height = self.Rectangle.get_height()
y1 = y0+height
# ======================================
# Validate borders
# ======================================
if (border_px is None) and (border is not None):
xborder = border[0]*width
yborder = border[1]*height
elif (border_px is not None) and (border is None):
xborder = border_px[0]
yborder = border_px[1]
elif (border_px is None) and (border is None):
raise IOError('No border info specified!')
elif (border_px is not None) and (border is not None):
raise IOError('Too much border info specified, both border_px and border!')
else:
raise IOError('End of the line!')
# ======================================
# Add borders
# ======================================
x0 = x0 - xborder
x1 = x1 + xborder
y0 = y0 - yborder
y1 = y1 + yborder
# ======================================
# Validate coordinates to prevent
# unPythonic crash
# ======================================
if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])):
print('X issue')
print('Requested: x=({}, {})'.format(x0, x1))
x0 = 0
x1 = self.image.shape[1]
if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])):
print('y issue')
print('Requested: y=({}, {})'.format(y0, y1))
y0 = 0
y1 = self.image.shape[0]
# ======================================
# Set viewable area
# ======================================
self.ax.set_xlim(x0, x1)
self.ax.set_ylim(y0, y1)
# ======================================
# Redraw canvas to show updates
# ======================================
self.ax.figure.canvas.draw()
class Mpl_Image_Plus_Slider(QtGui.QWidget):
# def __init__(self, parent=None, **kwargs):
def __init__(self, parent=None, **kwargs):
# Initialize self as a widget
QtGui.QWidget.__init__(self, parent)
# Add a vertical layout with parent self
self.vLayout = QtGui.QVBoxLayout(self)
self.vLayout.setObjectName(_fromUtf8("vLayout"))
# Add an Mpl_Image widget to vLayout,
# save it to self._img
# Pass arguments through to Mpl_Image.
self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs)
self._img.setObjectName(_fromUtf8("_img"))
self.vLayout.addWidget(self._img)
# Add a slider to vLayout,
# save it to self.max_slider
# self.max_slider = QtGui.QSlider(self)
self.max_slider = Slider_and_Text(self)
self.max_slider.setObjectName(_fromUtf8("max_slider"))
self.max_slider.setOrientation(QtCore.Qt.Horizontal)
self.vLayout.addWidget(self.max_slider)
# Setup slider to work with _img's clims
self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val))
def _get_image(self):
return self._img.image
def _set_image(self, image):
self._img.image = image
maximage = _np.max(_np.max(image))
self.max_slider.setMaximum(maximage)
image = property(_get_image, _set_image)
def _get_ax(self):
return self._img.ax
ax = property(_get_ax)
def _get_Rectangle(self):
return self._img.Rectangle
# def _set_rect(self, rect):
# self._img.rect(rect)
Rectangle = property(_get_Rectangle)
def zoom_rect(self, border=None, border_px=None):
self._img.zoom_rect(border, border_px)
def set_clim(self, *args, **kwargs):
self._img.set_clim(*args, **kwargs)
def setSliderValue(self, val):
self.max_slider.setValue(val)
| mit |
mathhun/scipy_2015_sklearn_tutorial | notebooks/figures/plot_kneighbors_regularization.py | 25 | 1363 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
def make_dataset(n_samples=100):
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, n_samples)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
return x, y
def plot_regression_datasets():
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
for n_samples, ax in zip([10, 100, 1000], axes):
x, y = make_dataset(n_samples)
ax.plot(x, y, 'o', alpha=.6)
def plot_kneighbors_regularization():
rnd = np.random.RandomState(42)
x = np.linspace(-3, 3, 100)
y_no_noise = np.sin(4 * x) + x
y = y_no_noise + rnd.normal(size=len(x))
X = x[:, np.newaxis]
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
x_test = np.linspace(-3, 3, 1000)
for n_neighbors, ax in zip([2, 5, 20], axes.ravel()):
kneighbor_regression = KNeighborsRegressor(n_neighbors=n_neighbors)
kneighbor_regression.fit(X, y)
ax.plot(x, y_no_noise, label="true function")
ax.plot(x, y, "o", label="data")
ax.plot(x_test, kneighbor_regression.predict(x_test[:, np.newaxis]),
label="prediction")
ax.legend()
ax.set_title("n_neighbors = %d" % n_neighbors)
if __name__ == "__main__":
plot_kneighbors_regularization()
plt.show()
| cc0-1.0 |
qifeigit/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
sonyahanson/assaytools | examples/ipynbs/data-analysis/spectra/2015-12-18/xml2png4scans-spectra.py | 8 | 5636 | # This script takes xml data file output from the Tecan Infinite m1000 Pro plate reader
# and makes quick and dirty images of the raw data.
# But with scans and not just singlet reads.
# This script specifically combines four spectrum scripts (AB, CD, EF, GH) into a single dataframe and plot.
# The same procedure can be used to make matrices suitable for analysis using
# matrix = dataframe.values
# Made by Sonya Hanson, with some help from things that worked in xml2png.py and xml2png4scans.py
# Friday, November 18,2015
# Usage: python xml2png4scans-spectra.py *.xml
############ For future to combine with xml2png.py
#
# for i, sect in enumerate(Sections):
# reads = sect.xpath("*/Well")
# parameters = root.xpath(path)[0]
# if reads[0].attrib['Type'] == "Scan":
#
##############
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import matplotlib.cm as cm
import seaborn
import sys
import os
### Define xml files.
xml_files = sys.argv[1:]
so_many = len(xml_files)
print "****This script is about to make png files for %s xml files. ****" % so_many
### Define extract function that extracts parameters
def extract(taglist):
result = []
for p in taglist:
print "Attempting to extract tag '%s'..." % p
try:
param = parameters.xpath("*[@Name='" + p + "']")[0]
result.append( p + '=' + param.attrib['Value'])
except:
### tag not found
result.append(None)
return result
### Define an initial set of dataframes, one per each section
large_dataframe0 = pd.DataFrame()
large_dataframe1 = pd.DataFrame()
large_dataframe2 = pd.DataFrame()
for file in xml_files:
### Parse XML file.
root = etree.parse(file)
### Remove extension from xml filename.
file_name = os.path.splitext(file)[0]
### Extract plate type and barcode.
plate = root.xpath("/*/Header/Parameters/Parameter[@Name='Plate']")[0]
plate_type = plate.attrib['Value']
try:
bar = root.xpath("/*/Plate/BC")[0]
barcode = bar.text
except:
barcode = 'no barcode'
### Define Sections.
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
for i, sect in enumerate(Sections):
### Extract Parameters for this section.
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
### Parameters are extracted slightly differently depending on Absorbance or Fluorescence read.
# Attach these to title1, title2, or title3, depending on section which will be the same for all 4 files.
if parameters[0].attrib['Value'] == "Absorbance":
result = extract(["Mode", "Wavelength Start", "Wavelength End", "Wavelength Step Size"])
globals()["title"+str(i)] = '%s, %s, %s, %s' % tuple(result)
else:
result = extract(["Gain", "Excitation Wavelength", "Emission Wavelength", "Part of Plate", "Mode"])
globals()["title"+str(i)] = '%s, %s, %s, \n %s, %s' % tuple(result)
print "****The %sth section has the parameters:****" %i
print globals()["title"+str(i)]
### Extract Reads for this section.
Sections = root.xpath("/*/Section")
reads = root.xpath("/*/Section[@Name='" + sect.attrib['Name'] + "']/*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(s.text, float(s.attrib['WL']), r.attrib['Pos'])
for r in reads
for s in r]
dataframe = pd.DataFrame(data, columns=['fluorescence','wavelength (nm)','Well'])
### dataframe_rep replaces 'OVER' (when fluorescence signal maxes out) with '3289277', an arbitrarily high number
dataframe_rep = dataframe.replace({'OVER':'3289277'})
dataframe_rep[['fluorescence']] = dataframe_rep[['fluorescence']].astype('float')
### Create large_dataframe1, large_dataframe2, and large_dataframe3 that collect data for each section
### as we run through cycle through sections and files.
globals()["dataframe_pivot"+str(i)] = pd.pivot_table(dataframe_rep, index = 'wavelength (nm)', columns= ['Well'])
print 'The max fluorescence value in this dataframe is %s'% globals()["dataframe_pivot"+str(i)].values.max()
globals()["large_dataframe"+str(i)] = pd.concat([globals()["large_dataframe"+str(i)],globals()["dataframe_pivot"+str(i)]])
### Plot, making a separate png for each section.
for i, sect in enumerate(Sections):
section_name = sect.attrib['Name']
path = "/*/Section[@Name='" + sect.attrib['Name'] + "']/Parameters"
parameters = root.xpath(path)[0]
if parameters[0].attrib['Value'] == "Absorbance":
section_ylim = [0,0.2]
else:
section_ylim = [0,40000]
Alphabet = ['A','B','C','D','E','F','G','H']
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))
for j,A in enumerate(Alphabet):
for k in range(1,12):
try:
globals()["large_dataframe"+str(i)].fluorescence.get(A + str(k)).plot(ax=axes[(j/3)%3,j%3], title=A, c=cm.hsv(k*15), ylim=section_ylim, xlim=[240,800])
except:
print "****No row %s.****" %A
fig.suptitle('%s \n %s \n Barcode = %s' % (globals()["title"+str(i)], plate_type, barcode), fontsize=14)
fig.subplots_adjust(hspace=0.3)
plt.savefig('%s_%s.png' % (file_name, section_name))
| lgpl-2.1 |
alexei-matveev/ase-local | doc/exercises/siesta1/answer1.py | 3 | 1197 | # -*- coding: utf-8 -*-
# creates: ener.png distance.png angle.png
import os
import matplotlib
matplotlib.use('Agg')
import pylab as plt
e_s = [0.01,0.1,0.2,0.3,0.4,0.5]
E = [-463.2160, -462.9633, -462.4891, -462.0551,
-461.5426, -461.1714]
d = [1.1131, 1.1046, 1.0960, 1.0901,
1.0857, 1.0810]
alpha = [100.832453365, 99.568214268, 99.1486065462,
98.873671379, 98.1726341945, 98.0535643778]
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.29, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, E, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'Energy [eV]')
plt.title('Total Energy vs Eshift')
plt.savefig('ener.png')
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.24, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, d, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'O-H distance [Å]')
limits = plt.axis('tight')
plt.title('O-H distance vs Eshift')
plt.savefig('distance.png')
fig=plt.figure(figsize=(3, 2.5))
fig.subplots_adjust(left=.26, right=.96, top=.9, bottom=0.16)
plt.plot(e_s, alpha, 'o-')
plt.xlabel(u'Energy shift [eV]')
plt.ylabel(u'H20 angle')
limits = plt.axis('tight')
plt.title('O-H distance vs Eshift')
plt.savefig('angle.png')
| gpl-2.0 |
mugwizaleon/PCRasterMapstacks | pcrastermapstackvisualisation.py | 1 | 17920 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PcrasterMapstackVisualisation
A QGIS plugin
PCRaster Mapstack visualisation
-------------------
begin : 2014-06-28
copyright : (C) 2014 by Leon
email : mugwizal@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
import qgis.utils
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
from pcrastermapstackvisualisationdialog import PcrasterMapstackVisualisationDialog
from Animationdialog import AnimationDialog
from TSSvisualizationdialog import TSSVisualizationDialog
# Import modules
import os.path
import os, glob
import time
import sys
import string
class PcrasterMapstackVisualisation:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value("locale/userLocale")[0:2]
localePath = os.path.join(self.plugin_dir, 'i18n', 'pcrastermapstackvisualisation_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = PcrasterMapstackVisualisationDialog()
self.dlg2 = AnimationDialog()
self.dlg3 = TSSVisualizationDialog()
# Mapstack series visualization
QObject.connect( self.dlg.ui.pushButton_7, SIGNAL( "clicked()" ), self.DisplayTSSnames)
QObject.connect( self.dlg.ui.pushButton_6, SIGNAL( "clicked()" ), self.TSSgraphs)
QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( "clicked()" ), self.selectDir ) #link the button to the function of selecting the directory
QObject.connect( self.dlg.ui.btnBaseDir_3, SIGNAL( "clicked()" ), self.loadMapStackCoreName ) #link the button to the function of selecting the directory
QObject.connect( self.dlg.ui.pushButton_5, SIGNAL( "clicked()" ), self.actionStart)
QObject.connect( self.dlg2.ui.pushButton_2, SIGNAL( "clicked()" ), self.ActionAnim)
QObject.connect( self.dlg2.ui.pushButton_3, SIGNAL( "clicked()" ), self.actionNext)
QObject.connect( self.dlg2.ui.pushButton, SIGNAL( "clicked()" ), self.actionPrevious)
QObject.connect( self.dlg2.ui.pushButton_4, SIGNAL( "clicked()" ), self.actionStart)
QObject.connect( self.dlg2.ui.pushButton_5, SIGNAL( "clicked()" ), self.actionLast)
QObject.connect(self.dlg.ui.comboBox, SIGNAL("currentIndexChanged (const QString&)"), self.changelist) #Change the list of mapstacks
#Close dialogs widgets
QObject.connect( self.dlg.ui.pushButton, SIGNAL( "clicked()" ), self.close1)
QObject.connect( self.dlg3.ui.pushButton, SIGNAL( "clicked()" ), self.close2)
QObject.connect( self.dlg2.ui.pushButton_6, SIGNAL( "clicked()" ), self.close3)
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(
QIcon(":/plugins/pcrastermapstackvisualisation/Myicon.png"),
u"Mapstacks_visualisation", self.iface.mainWindow())
# connect the action to the run method
self.action.triggered.connect(self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&PCRaster Mapstacks Viewer", self.action)
self.iface.addPluginToRasterMenu(u"&PCRaster Mapstacks Viewer", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&PCRaster Time series Viewer", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
def close1(self):
self.dlg.close()
def TSSview(self):
self.dlg3.move(10, 300)
self.dlg3.show()# show the dialog
def close2(self):
self.dlg3.close()
self.dlg.show()
def AnimationDlg (self):
self.dlg2.move(200, 200)
self.dlg2.show()# show the dialog
def close3(self):
self.dlg2.close()
self.dlg.show()
# Selecting the directory containg files
def selectDir( self ):
self.dlg.hide()
settings = QSettings()
path = QFileDialog.getExistingDirectory( self.iface.mainWindow(), "Select a directory")
if path: self.dlg.ui.txtBaseDir2_5.setText( path )
self.dlg.show()
def actionRemove(self):
layers = self.iface.legendInterface().layers()
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
else : self.iface.legendInterface().moveLayer( layer, 0 )
self.iface.legendInterface().removeGroup(0)
def AddLayer(self, input):
layerPath = os.path.join(self.dataDir, input)
fileInfo = QFileInfo(layerPath)
baseName = fileInfo.baseName()
layer = QgsRasterLayer(layerPath, baseName)
uri = os.path.join(self.dataDir, 'MyFile.qml')
layer.loadNamedStyle(uri)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def loadFiles(self, filename):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
file_list = glob.glob(filename)
for index in file_list:
list = index.split(".")
if (len(list) < 2) :
file_list.remove(index)
for index in file_list:
if index.endswith(".tss"):
file_list.remove(index)
for index in file_list:
if index.endswith(".xml") or index.endswith(".aux.xml") :
file_list.remove(index)
for index in file_list:
if index.endswith(".tss"):
file_list.remove(index)
file_list.sort()
return file_list
def loadMapStackCoreName(self):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
files= os.listdir(self.dataDir)
self.dlg.ui.comboBox.clear()
self.dlg.ui.comboBox_2.clear()
MyList=[]
MyList2 =[]
MyList3 = []
for index in files:
list = index.split(".")
if (len(list)==2) and (len(list[0])== 8) and (len(list[1])== 3) and (list[1].isdigit()):
MyList.append(index)
if index.endswith(".tss"):
MyList3.append(index)
for index in MyList:
list = index.split(".")
words = list[0].replace("0", "")
MyList2.append(words)
FinalList = []
for i in MyList2:
if i not in FinalList:
FinalList.append(i)
self.dlg.ui.comboBox.addItems(FinalList)
self.dlg.ui.comboBox_2.addItems(MyList3)
def DisplayTSSnames(self):
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
if not self.dataDir : pass
else:
os.chdir(self.dataDir )
if not self.dlg.ui.comboBox.currentText(): pass
else:
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
self.dlg.ui.listWidget.clear()
for index, file in enumerate(file_list):
self.dlg.ui.listWidget.addItem(file)
def changelist(self):
self.dlg.ui.listWidget.clear()
def ActionAnim(self):
self.actionRemove()
Group = self.iface.legendInterface().addGroup("group_foo")
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
legend = self.iface.legendInterface()
self.dlg2.ui.pushButton_6.setEnabled(False)
for index, file in enumerate(file_list):
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file, os.path.basename(str(file))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
rlayer = qgis.utils.iface.activeLayer()
legend.moveLayer( rlayer, 0 )
time.sleep(float(self.dlg2.ui.txtBaseDir2_5.text()))
self.dlg2.ui.pushButton_6.setEnabled(True)
def actionStart(self):
import Styling
self.dlg.hide()
self.iface.messageBar().clearWidgets ()
layers = self.iface.legendInterface().layers()
for layer in layers :
if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
if not self.dataDir :
QMessageBox.information( self.iface.mainWindow(),"Info", "Please select a directory first")
self.dlg.show()
else :
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
if not self.dlg.ui.comboBox.currentText():
QMessageBox.information( self.iface.mainWindow(),"Info", "The are no PCRaster mapstacks in this directory")
self.dlg.show()
# return
else:
self.AnimationDlg()
Styling.style1(filename, 'value', self.dataDir, file_list )
s = QSettings()
oldValidation = s.value( "/Projections/defaultBehaviour", "useGlobal" )
s.setValue( "/Projections/defaultBehaviour", "useGlobal" )
self.AddLayer(str(file_list[0]))
s.setValue( "/Projections/defaultBehaviour", oldValidation )
layer = qgis.utils.iface.activeLayer()
# self.PrincipalLayer = layer.name()
# print self.PrincipalLayer
self.iface.legendInterface().setLayerExpanded(layer, True)
def actionLast(self):
self.actionRemove()
self.dlg.hide()
self.AnimationDlg()
self.iface.messageBar().clearWidgets ()
layers = self.iface.legendInterface().layers()
for layer in layers :
if self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().setLayerVisible(layer, False)
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
index = len(file_list) - 1
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def actionNext(self):
self.actionRemove()
self.iface.messageBar().clearWidgets ()
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
if layer is None :
index = 0
elif layer.name() not in file_list:
index = 0
else :
counter = file_list.index(layer.name())
index = counter + 1
if counter == len(file_list) - 1 :
layers = self.iface.legendInterface().layers()
self.iface.legendInterface().addGroup("group_foo")
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )
index = 0
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def actionPrevious(self):
self.actionRemove()
self.iface.messageBar().clearWidgets ()
import numpy
numpy.seterr(divide='ignore', invalid='ignore', over='ignore')
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
filename = '*'+str(self.dlg.ui.comboBox.currentText())+'*'
file_list = self.loadFiles(filename)
layer = qgis.utils.iface.activeLayer()
self.PrincipalLayer = layer.name()
if layer is None :
index = len(file_list) - 1
elif layer.name() not in file_list:
index = len(file_list) - 1
else :
counter = file_list.index(layer.name())
index = counter - 1
if counter == 0 :
layers = self.iface.legendInterface().layers()
self.iface.legendInterface().addGroup("group_foo")
for layer in layers :
if layer.name() == self.PrincipalLayer : pass
elif self.iface.legendInterface().isLayerVisible(layer) : self.iface.legendInterface().moveLayer( layer, 0 )
index = len(file_list) - 1
canvas = qgis.utils.iface.mapCanvas()
import Styling
Styling.style1(file_list[index], 'value', self.dataDir, file_list )
uri = os.path.join(self.dataDir, 'MyFile.qml')
self.iface.addRasterLayer(file_list[index], os.path.basename(str(file_list[index]))).loadNamedStyle(uri)
canvas.refresh()
canvas.zoomToFullExtent()
def TSSgraphs(self):# wtih matplotlib
self.dlg.hide()
filename = str(self.dlg.ui.comboBox_2.currentText())
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
file = os.path.join (self.dataDir, filename)
if os.path.isfile(file):
self.TSSview()
self.dataDir = str(self.dlg.ui.txtBaseDir2_5.text())
os.chdir(self.dataDir )
stripped = []
stripper = open(filename, 'r')
st_lines = stripper.readlines()[4:]
stripper.close()
for lines in st_lines:
stripped_line = " ".join(lines.split())
stripped.append(stripped_line)
data = "\n".join(stripped)
data = data.split('\n')
values = []
dates = []
years = 0
yl = []
for row in data:
x, y = row.split()
values.append(float(y))
year = (int(x.translate(string.maketrans("\n\t\r", " ")).strip()))
dates.append(year)
years = years +1
yl.append(years)
xlabels = yl
self.dlg3.ui.widget.canvas.ax.clear()
self.dlg3.ui.widget.canvas.ax.set_position([0.155,0.15,0.82,0.75])
self.dlg3.ui.widget.canvas.ax.set_title(filename)
self.dlg3.ui.widget.canvas.ax.set_xlabel ('Time step')
self.dlg3.ui.widget.canvas.ax.set_ylabel ('Values')
self.dlg3.ui.widget.canvas.ax.plot(dates, values)
self.dlg3.ui.widget.canvas.ax.set_xticks(dates)
self.dlg3.ui.widget.canvas.ax.set_xticklabels(xlabels, rotation=30, fontsize=10)
self.dlg3.ui.widget.canvas.draw()
else:
QMessageBox.information( self.iface.mainWindow(),"Info", "The are no PCRaster timeseries this directory")
self.dlg.show()
| apache-2.0 |
trungnt13/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
michelp/pywt | util/refguide_check.py | 2 | 27051 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a PyWavelets submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
import numpy as np
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc',
# 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "pywt"
PUBLIC_SUBMODULES = []
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = []
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg": None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf, }
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(
self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim',
'set_xlim', '# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*',
help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-examples", action="store_true",
help="Skip running doctests in the examples.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_examples = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_examples:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_examples:
examples_path = os.path.join(
os.getcwd(), 'doc', 'source', 'regression', '*.rst')
print('\nChecking examples files at %s:' % examples_path)
for filename in sorted(glob.glob(examples_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
examples_results = check_doctests_testfile(
filename, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, examples_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| mit |
andaag/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_inst/padova_inst_6/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
debsankha/bedtime-programming | ls222/visual-lotka.py | 1 | 5120 | #!/usr/bin/env python
from math import *
import thread
import random
import time
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import commands
import matplotlib.pyplot
class rodent:
def __init__(self):
self.time_from_last_childbirth=0
class felix:
def __init__(self):
self.size=0
self.is_virgin=1
self.reproduction_gap=0
self.time_from_last_childbirth=0
self.age=0
# print 'painted'
class gui_display:
def __init__(self):
self.gladefile='./lvshort.glade'
self.wTree = gtk.glade.XML(self.gladefile)
dic={"on_start_clicked":self.dynamics,"on_mainwin_destroy":gtk.main_quit}
self.wTree.signal_autoconnect(dic)
self.wTree.get_widget("mainwin").show()
self.wTree.get_widget("image").set_from_file("./start.png")
def visualize(self,catn,mousen):
# while True:
num=40
size=10
catno=catn*num**2/(catn+mousen)
cats=random.sample(range(num**2),catno)
for i in range(num**2):
if i in cats:
self.dic[i].color=visual.color.red
else :
self.dic[i].color=visual.color.green
def dynamics(self,*args,**kwargs):
self.wTree.get_widget("image").set_from_file("./wait.png")
print 'dynamics started'
mouse_size=20 #ind parameter
cat_mature_size=60 #ind parameter
# catch_rate=5*10**-4 #parameter
# cat_efficiency=0.8 #parameter
# a=0.2 #will get from slider
# c=0.2 #will get from slider
cat_catch_rate=self.wTree.get_widget("catchrate").get_value()*10**-4 #parameter
cat_efficiency=self.wTree.get_widget("efficiency").get_value() #parameter
a=self.wTree.get_widget("a").get_value() #parameter
c=self.wTree.get_widget("c").get_value() #parameter
mouse_no=1000
cat_no=1000
t=0
tmax=200
dt=1
timeli=[]
miceli=[]
catli=[]
mice=[rodent() for i in range(mouse_no)]
cats=[felix() for i in range(cat_no)]
catn=len(cats)
mousen=len(mice)
self.dic={}
num=40
size=10
catno=catn*num**2/(catn+mousen)
disp_cats=random.sample(range(num**2),catno)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
for i in range(num**2):
coords=((i%num)*size*2-num*size,(i/num)*size*2-num*size)
if i in disp_cats:
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.red)
else :
self.dic[i]=visual.sphere(pos=coords,radius=size,color=visual.color.green)
print self.dic
catn=len(cats)
mousen=len(mice)
data=open('tempdata.dat','w')
timestart=time.time()
while (len(mice)>0 or len(cats)>0) and t<tmax and (time.time()-timestart)<60:
# print time.time()-timestart
catn=len(cats)
mousen=len(mice)
if self.wTree.get_widget("anim").get_active()==1:
print 'yay!'
# self.visualize(catn,mousen)
thread.start_new_thread(self.visualize,(catn,mousen))
for mouse in mice:
if mouse.time_from_last_childbirth>=1/a:
mouse.time_from_last_childbirth=0
mice.append(rodent())
mouse.time_from_last_childbirth+=dt
ind=0
while ind<len(cats):
cat=cats[ind]
cat.age+=dt
num=cat_catch_rate*dt*len(mice)
for i in range(int(num)):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if (num-int(num))>random.uniform(0,1):
caught=random.randint(0,len(mice)-1)
cat.size+=mouse_size*cat_efficiency #size increases
mice.pop(caught)
if cat.size>cat_mature_size:
if cat.is_virgin:
cat.is_virgin=0
cat.reproduction_gap=cat.age
cats.append(felix())
else :
if cat.time_from_last_childbirth>cat.reproduction_gap:
cats.append(felix())
cat.time_from_last_childbirth=0
if cat.is_virgin==0:
cat.time_from_last_childbirth+=dt
if len(cats)>0:
if c*dt*2*atan(0.05*len(cats))/pi>random.uniform(0,1):
cats.pop(ind)
else :
ind+=1
else :
ind+=1
timeli.append(t)
miceli.append(len(mice))
catli.append(len(cats))
print t,'\t',len(mice),'\t',len(cats)
print >> data, t,'\t',len(mice),'\t',len(cats)
t+=dt
data.close()
upper_limit=1.2*len(mice)
pltfile=open('lv.plt','w')
print >> pltfile,"""se te png
se o "/tmp/lv.png"
unse ke
#se yrange [0:%f]
se xl "Time"
se yl "Number of Prey/Predator"
p 'tempdata.dat' u 1:2 w l,'tempdata.dat' u 1:3 w l
"""%upper_limit
pltfile.close()
commands.getoutput('gnuplot lv.plt')
self.wTree.get_widget("image").set_from_file("/tmp/lv.png")
print 'dynamics ended'
reload(matplotlib.pyplot)
matplotlib.pyplot.plot(timeli,catli,'g-')#timeli,catli,'r-')
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel("Number of mice and cats")
matplotlib.pyplot.show()
gui=gui_display()
gtk.main()
#dynamics()
#import matplotlib.pyplot as plt
#plt.plot(timeli,miceli,'go',timeli,catli,'ro')
#plt.show()
| gpl-3.0 |
kwilliams-mo/iris | lib/iris/tests/test_plot.py | 1 | 32122 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
from functools import wraps
import types
import warnings
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as coords
import iris.plot as iplt
import iris.quickplot as qplt
import iris.symbols
import iris.tests.stock
import iris.tests.test_mapping as test_mapping
def simple_cube():
cube = iris.tests.stock.realistic_4d()
cube = cube[:, 0, 0, :]
cube.coord('time').guess_bounds()
return cube
class TestSimple(tests.GraphicsTest):
def test_points(self):
cube = simple_cube()
qplt.contourf(cube)
self.check_graphic()
def test_bounds(self):
cube = simple_cube()
qplt.pcolor(cube)
self.check_graphic()
class TestMissingCoord(tests.GraphicsTest):
def _check(self, cube):
qplt.contourf(cube)
self.check_graphic()
qplt.pcolor(cube)
self.check_graphic()
def test_no_u(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
self._check(cube)
def test_no_v(self):
cube = simple_cube()
cube.remove_coord('time')
self._check(cube)
def test_none(self):
cube = simple_cube()
cube.remove_coord('grid_longitude')
cube.remove_coord('time')
self._check(cube)
@iris.tests.skip_data
class TestMissingCS(tests.GraphicsTest):
@iris.tests.skip_data
def test_missing_cs(self):
cube = tests.stock.simple_pp()
cube.coord("latitude").coord_system = None
cube.coord("longitude").coord_system = None
qplt.contourf(cube)
qplt.plt.gca().coastlines()
self.check_graphic()
class TestHybridHeight(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()[0, :15, 0, :]
def _check(self, plt_method, test_altitude=True):
plt_method(self.cube)
self.check_graphic()
plt_method(self.cube, coords=['level_height', 'grid_longitude'])
self.check_graphic()
plt_method(self.cube, coords=['grid_longitude', 'level_height'])
self.check_graphic()
if test_altitude:
plt_method(self.cube, coords=['grid_longitude', 'altitude'])
self.check_graphic()
plt_method(self.cube, coords=['altitude', 'grid_longitude'])
self.check_graphic()
def test_points(self):
self._check(qplt.contourf)
def test_bounds(self):
self._check(qplt.pcolor, test_altitude=False)
def test_orography(self):
qplt.contourf(self.cube)
iplt.orography_at_points(self.cube)
iplt.points(self.cube)
self.check_graphic()
coords = ['altitude', 'grid_longitude']
qplt.contourf(self.cube, coords=coords)
iplt.orography_at_points(self.cube, coords=coords)
iplt.points(self.cube, coords=coords)
self.check_graphic()
# TODO: Test bounds once they are supported.
with self.assertRaises(NotImplementedError):
qplt.pcolor(self.cube)
iplt.orography_at_bounds(self.cube)
iplt.outline(self.cube)
self.check_graphic()
class Test1dPlotMultiArgs(tests.GraphicsTest):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = iplt.plot
def test_cube(self):
# just plot a cube against its dim coord
self.draw_method(self.cube1d) # altitude vs temp
self.check_graphic()
def test_coord(self):
# plot the altitude coordinate
self.draw_method(self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_cube(self):
# plot temperature against sigma
self.draw_method(self.cube1d.coord('sigma'), self.cube1d)
self.check_graphic()
def test_cube_coord(self):
# plot a vertical profile of temperature
self.draw_method(self.cube1d, self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord(self):
# plot two coordinates that are not mappable
self.draw_method(self.cube1d.coord('sigma'),
self.cube1d.coord('altitude'))
self.check_graphic()
def test_coord_coord_map(self):
# plot lat-lon aux coordinates of a trajectory, which draws a map
lon = iris.coords.AuxCoord([0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
standard_name='longitude',
units='degrees_north')
lat = iris.coords.AuxCoord([45, 55, 50, 60, 55, 65, 60, 70, 65, 75],
standard_name='latitude',
units='degrees_north')
self.draw_method(lon, lat)
plt.gca().coastlines()
self.check_graphic()
def test_cube_cube(self):
# plot two phenomena against eachother, in this case just dummy data
cube1 = self.cube1d.copy()
cube2 = self.cube1d.copy()
cube1.rename('some phenomenon')
cube2.rename('some other phenomenon')
cube1.units = iris.unit.Unit('no_unit')
cube2.units = iris.unit.Unit('no_unit')
cube1.data[:] = np.linspace(0, 1, 7)
cube2.data[:] = np.exp(cube1.data)
self.draw_method(cube1, cube2)
self.check_graphic()
def test_incompatible_objects(self):
# incompatible objects (not the same length) should raise an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d.coord('time'), (self.cube1d))
def test_multimidmensional(self):
# multidimensional cubes are not allowed
cube = _load_4d_testcube()[0, :, :, 0]
with self.assertRaises(ValueError):
self.draw_method(cube)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates, otherwise an error should be
# raised
xdim = np.arange(self.cube1d.shape[0])
with self.assertRaises(TypeError):
self.draw_method(xdim, self.cube1d)
def test_coords_deprecated(self):
# ensure a warning is raised if the old coords keyword argument is
# used, and make sure the plot produced is consistent with the old
# interface
msg = 'Missing deprecation warning for coords keyword.'
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.draw_method(self.cube1d, coords=['sigma'])
self.assertEqual(len(w), 1, msg)
self.check_graphic()
def test_coords_deprecation_too_many(self):
# in deprecation mode, too many coords is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['sigma', 'sigma'])
def test_coords_deprecation_invalid_span(self):
# in deprecation mode, a coordinate that doesn't span data is an error
with self.assertRaises(ValueError):
self.draw_method(self.cube1d, coords=['time'])
class Test1dQuickplotPlotMultiArgs(Test1dPlotMultiArgs):
# tests for iris.plot using multi-argument calling convention
def setUp(self):
self.cube1d = _load_4d_testcube()[0, :, 0, 0]
self.draw_method = qplt.plot
@tests.skip_data
class Test1dScatter(tests.GraphicsTest):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = iplt.scatter
def test_coord_coord(self):
x = self.cube.coord('longitude')
y = self.cube.coord('height')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_coord_coord_map(self):
x = self.cube.coord('longitude')
y = self.cube.coord('latitude')
c = self.cube.data
self.draw_method(x, y, c=c, edgecolor='none')
plt.gca().coastlines()
self.check_graphic()
def test_coord_cube(self):
x = self.cube.coord('latitude')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_coord(self):
x = self.cube
y = self.cube.coord('height')
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_cube_cube(self):
x = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Rel Humidity')
y = self.cube
c = self.cube.coord('Travel Time').points
self.draw_method(x, y, c=c, edgecolor='none')
self.check_graphic()
def test_incompatible_objects(self):
# cubes/coordinates of different sizes cannot be plotted
x = self.cube
y = self.cube.coord('height')[:-1]
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_multidimensional(self):
# multidimensional cubes/coordinates are not allowed
x = _load_4d_testcube()[0, :, :, 0]
y = x.coord('model_level_number')
with self.assertRaises(ValueError):
self.draw_method(x, y)
def test_not_cube_or_coord(self):
# inputs must be cubes or coordinates
x = np.arange(self.cube.shape[0])
y = self.cube
with self.assertRaises(TypeError):
self.draw_method(x, y)
@tests.skip_data
class Test1dQuickplotScatter(Test1dScatter):
def setUp(self):
self.cube = iris.load_cube(
tests.get_data_path(('NAME', 'NAMEIII_trajectory.txt')),
'Temperature')
self.draw_method = qplt.scatter
@iris.tests.skip_data
class TestAttributePositive(tests.GraphicsTest):
def test_1d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube.coord('depth'), cube[0, :, 60, 80])
self.check_graphic()
def test_1d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)
qplt.plot(cube[0, :, 60, 80], cube.coord('depth'))
self.check_graphic()
def test_2d_positive_up(self):
path = tests.get_data_path(('NetCDF', 'testing',
'small_theta_colpex.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
def test_2d_positive_down(self):
path = tests.get_data_path(('NetCDF', 'ORCA2', 'votemper.nc'))
cube = iris.load_cube(path)[0, :, 42, :]
qplt.pcolormesh(cube)
self.check_graphic()
# Caches _load_4d_testcube so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = fn.__name__
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_4d_testcube():
# Load example 4d data (TZYX).
test_cube = iris.tests.stock.realistic_4d()
# Replace forecast_period coord with a multi-valued version.
time_coord = test_cube.coord('time')
n_times = len(time_coord.points)
forecast_dims = test_cube.coord_dims(time_coord)
test_cube.remove_coord('forecast_period')
# Make up values (including bounds), to roughly match older testdata.
point_values = np.linspace((1 + 1.0 / 6), 2.0, n_times)
point_uppers = point_values + (point_values[1] - point_values[0])
bound_values = np.column_stack([point_values, point_uppers])
# NOTE: this must be a DimCoord
# - an equivalent AuxCoord produces different plots.
new_forecast_coord = iris.coords.DimCoord(
points=point_values,
bounds=bound_values,
standard_name='forecast_period',
units=iris.unit.Unit('hours')
)
test_cube.add_aux_coord(new_forecast_coord, forecast_dims)
# Heavily reduce dimensions for faster testing.
# NOTE: this makes ZYX non-contiguous. Doesn't seem to matter for now.
test_cube = test_cube[:, ::10, ::10, ::10]
return test_cube
@cache
def _load_wind_no_bounds():
# Load the COLPEX data => TZYX
path = tests.get_data_path(('PP', 'COLPEX', 'small_eastward_wind.pp'))
wind = iris.load_cube(path, 'eastward_wind')
# Remove bounds from all coords that have them.
wind.coord('grid_latitude').bounds = None
wind.coord('grid_longitude').bounds = None
wind.coord('level_height').bounds = None
wind.coord('sigma').bounds = None
return wind[:, :, :50, :50]
def _time_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the time coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('time')
return cube
def _date_series(src_cube):
# Until we have plotting support for multiple axes on the same dimension,
# remove the forecast_period coordinate and its axis.
cube = src_cube.copy()
cube.remove_coord('forecast_period')
return cube
class SliceMixin(object):
"""Mixin class providing tests for each 2-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_yx(self):
cube = self.wind[0, 0, :, :]
self.draw_method(cube)
self.check_graphic()
def test_zx(self):
cube = self.wind[0, :, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_tx(self):
cube = _time_series(self.wind[:, 0, 0, :])
self.draw_method(cube)
self.check_graphic()
def test_zy(self):
cube = self.wind[0, :, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_ty(self):
cube = _time_series(self.wind[:, 0, :, 0])
self.draw_method(cube)
self.check_graphic()
def test_tz(self):
cube = _time_series(self.wind[:, :, 0, 0])
self.draw_method(cube)
self.check_graphic()
@iris.tests.skip_data
class TestContour(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contour routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contour
@iris.tests.skip_data
class TestContourf(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.contourf routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.contourf
@iris.tests.skip_data
class TestPcolor(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolor routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormesh(tests.GraphicsTest, SliceMixin):
"""Test the iris.plot.pcolormesh routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.pcolormesh
def check_warnings(method):
"""
Decorator that adds a catch_warnings and filter to assert
the method being decorated issues a UserWarning.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
# Force reset of iris.coords warnings registry to avoid suppression of
# repeated warnings. warnings.resetwarnings() does not do this.
if hasattr(coords, '__warningregistry__'):
coords.__warningregistry__.clear()
# Check that method raises warning.
with warnings.catch_warnings():
warnings.simplefilter("error")
with self.assertRaises(UserWarning):
return method(self, *args, **kwargs)
return decorated_method
def ignore_warnings(method):
"""
Decorator that adds a catch_warnings and filter to suppress
any warnings issues by the method being decorated.
"""
@wraps(method)
def decorated_method(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return method(self, *args, **kwargs)
return decorated_method
class CheckForWarningsMetaclass(type):
"""
Metaclass that adds a further test for each base class test
that checks that each test raises a UserWarning. Each base
class test is then overriden to ignore warnings in order to
check the underlying functionality.
"""
def __new__(cls, name, bases, local):
def add_decorated_methods(attr_dict, target_dict, decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
new_key = '_'.join((key, decorator.__name__))
if new_key not in target_dict:
wrapped = decorator(value)
wrapped.__name__ = new_key
target_dict[new_key] = wrapped
else:
raise RuntimeError('A attribute called {!r} '
'already exists.'.format(new_key))
def override_with_decorated_methods(attr_dict, target_dict,
decorator):
for key, value in attr_dict.items():
if (isinstance(value, types.FunctionType) and
key.startswith('test')):
target_dict[key] = decorator(value)
# Add decorated versions of base methods
# to check for warnings.
for base in bases:
add_decorated_methods(base.__dict__, local, check_warnings)
# Override base methods to ignore warnings.
for base in bases:
override_with_decorated_methods(base.__dict__, local,
ignore_warnings)
return type.__new__(cls, name, bases, local)
@iris.tests.skip_data
class TestPcolorNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolor routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolor
@iris.tests.skip_data
class TestPcolormeshNoBounds(tests.GraphicsTest, SliceMixin):
"""
Test the iris.plot.pcolormesh routine on a cube with coordinates
that have no bounds.
"""
__metaclass__ = CheckForWarningsMetaclass
def setUp(self):
self.wind = _load_wind_no_bounds()
self.draw_method = iplt.pcolormesh
class Slice1dMixin(object):
"""Mixin class providing tests for each 1-dimensional permutation of axes.
Requires self.draw_method to be the relevant plotting function,
and self.results to be a dictionary containing the desired test results."""
def test_x(self):
cube = self.wind[0, 0, 0, :]
self.draw_method(cube)
self.check_graphic()
def test_y(self):
cube = self.wind[0, 0, :, 0]
self.draw_method(cube)
self.check_graphic()
def test_z(self):
cube = self.wind[0, :, 0, 0]
self.draw_method(cube)
self.check_graphic()
def test_t(self):
cube = _time_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
self.check_graphic()
def test_t_dates(self):
cube = _date_series(self.wind[:, 0, 0, 0])
self.draw_method(cube)
plt.gcf().autofmt_xdate()
plt.xlabel('Phenomenon time')
self.check_graphic()
@iris.tests.skip_data
class TestPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.plot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = iplt.plot
@iris.tests.skip_data
class TestQuickplotPlot(tests.GraphicsTest, Slice1dMixin):
"""Test the iris.quickplot.plot routine."""
def setUp(self):
self.wind = _load_4d_testcube()
self.draw_method = qplt.plot
_load_cube_once_cache = {}
def load_cube_once(filename, constraint):
"""Same syntax as load_cube, but will only load a file once,
then cache the answer in a dictionary.
"""
global _load_cube_once_cache
key = (filename, str(constraint))
cube = _load_cube_once_cache.get(key, None)
if cube is None:
cube = iris.load_cube(filename, constraint)
_load_cube_once_cache[key] = cube
return cube
class LambdaStr(object):
"""Provides a callable function which has a sensible __repr__."""
def __init__(self, repr, lambda_fn):
self.repr = repr
self.lambda_fn = lambda_fn
def __call__(self, *args, **kwargs):
return self.lambda_fn(*args, **kwargs)
def __repr__(self):
return self.repr
@iris.tests.skip_data
class TestPlotCoordinatesGiven(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp'))
self.cube = load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.plot
self.contourf = LambdaStr('iris.plot.contourf',
lambda cube, *args, **kwargs:
iris.plot.contourf(cube, *args, **kwargs))
self.contour = LambdaStr('iris.plot.contour',
lambda cube, *args, **kwargs:
iris.plot.contour(cube, *args, **kwargs))
self.points = LambdaStr('iris.plot.points',
lambda cube, *args, **kwargs:
iris.plot.points(cube, c=cube.data,
*args, **kwargs))
self.plot = LambdaStr('iris.plot.plot',
lambda cube, *args, **kwargs:
iris.plot.plot(cube, *args, **kwargs))
self.results = {'yx': ([self.contourf, ['grid_latitude',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'grid_latitude']],
[self.contour, ['grid_latitude',
'grid_longitude']],
[self.contour, ['grid_longitude',
'grid_latitude']],
[self.points, ['grid_latitude',
'grid_longitude']],
[self.points, ['grid_longitude',
'grid_latitude']],),
'zx': ([self.contourf, ['model_level_number',
'grid_longitude']],
[self.contourf, ['grid_longitude',
'model_level_number']],
[self.contour, ['model_level_number',
'grid_longitude']],
[self.contour, ['grid_longitude',
'model_level_number']],
[self.points, ['model_level_number',
'grid_longitude']],
[self.points, ['grid_longitude',
'model_level_number']],),
'tx': ([self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],),
'x': ([self.plot, ['grid_longitude']],),
'y': ([self.plot, ['grid_latitude']],)
}
def draw(self, draw_method, *args, **kwargs):
draw_fn = getattr(self.draw_module, draw_method)
draw_fn(*args, **kwargs)
self.check_graphic()
def run_tests(self, cube, results):
for draw_method, coords in results:
draw_method(cube, coords=coords)
try:
self.check_graphic()
except AssertionError, err:
self.fail('Draw method %r failed with coords: %r. '
'Assertion message: %s' % (draw_method, coords, err))
def run_tests_1d(self, cube, results):
# there is a different calling convention for 1d plots
for draw_method, coords in results:
draw_method(cube.coord(coords[0]), cube)
try:
self.check_graphic()
except AssertionError as err:
msg = 'Draw method {!r} failed with coords: {!r}. ' \
'Assertion message: {!s}'
self.fail(msg.format(draw_method, coords, err))
def test_yx(self):
test_cube = self.cube[0, 0, :, :]
self.run_tests(test_cube, self.results['yx'])
def test_zx(self):
test_cube = self.cube[0, :15, 0, :]
self.run_tests(test_cube, self.results['zx'])
def test_tx(self):
test_cube = self.cube[:, 0, 0, :]
self.run_tests(test_cube, self.results['tx'])
def test_x(self):
test_cube = self.cube[0, 0, 0, :]
self.run_tests_1d(test_cube, self.results['x'])
def test_y(self):
test_cube = self.cube[0, 0, :, 0]
self.run_tests_1d(test_cube, self.results['y'])
def test_badcoords(self):
cube = self.cube[0, 0, :, :]
draw_fn = getattr(self.draw_module, 'contourf')
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude'])
self.assertRaises(ValueError, draw_fn, cube,
coords=['grid_longitude', 'grid_longitude',
'grid_latitude'])
self.assertRaises(iris.exceptions.CoordinateNotFoundError, draw_fn,
cube, coords=['grid_longitude', 'wibble'])
self.assertRaises(ValueError, draw_fn, cube, coords=[])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
self.assertRaises(ValueError, draw_fn, cube,
coords=[cube.coord('grid_longitude'),
cube.coord('grid_longitude'),
cube.coord('grid_longitude')])
def test_non_cube_coordinate(self):
cube = self.cube[0, :, :, 0]
pts = -100 + np.arange(cube.shape[1]) * 13
x = coords.DimCoord(pts, standard_name='model_level_number',
attributes={'positive': 'up'})
self.draw('contourf', cube, coords=['grid_latitude', x])
@iris.tests.skip_data
class TestPlotDimAndAuxCoordsKwarg(tests.GraphicsTest):
def setUp(self):
filename = tests.get_data_path(('NetCDF', 'rotated', 'xy',
'rotPole_landAreaFraction.nc'))
self.cube = iris.load_cube(filename)
def test_default(self):
iplt.contourf(self.cube)
plt.gca().coastlines()
self.check_graphic()
def test_coords(self):
# Pass in dimension coords.
rlat = self.cube.coord('grid_latitude')
rlon = self.cube.coord('grid_longitude')
iplt.contourf(self.cube, coords=[rlon, rlat])
plt.gca().coastlines()
self.check_graphic()
# Pass in auxiliary coords.
lat = self.cube.coord('latitude')
lon = self.cube.coord('longitude')
iplt.contourf(self.cube, coords=[lon, lat])
plt.gca().coastlines()
self.check_graphic()
def test_coord_names(self):
# Pass in names of dimension coords.
iplt.contourf(self.cube, coords=['grid_longitude', 'grid_latitude'])
plt.gca().coastlines()
self.check_graphic()
# Pass in names of auxiliary coords.
iplt.contourf(self.cube, coords=['longitude', 'latitude'])
plt.gca().coastlines()
self.check_graphic()
def test_yx_order(self):
# Do not attempt to draw coastlines as it is not a map.
iplt.contourf(self.cube, coords=['grid_latitude', 'grid_longitude'])
self.check_graphic()
iplt.contourf(self.cube, coords=['latitude', 'longitude'])
self.check_graphic()
class TestSymbols(tests.GraphicsTest):
def test_cloud_cover(self):
iplt.symbols(range(10), [0] * 10, [iris.symbols.CLOUD_COVER[i]
for i in range(10)], 0.375)
self.check_graphic()
class TestPlottingExceptions(tests.IrisTest):
def setUp(self):
self.bounded_cube = tests.stock.lat_lon_cube()
self.bounded_cube.coord("latitude").guess_bounds()
self.bounded_cube.coord("longitude").guess_bounds()
def test_boundmode_multidim(self):
# Test exception translation.
# We can't get contiguous bounded grids from multi-d coords.
cube = self.bounded_cube
cube.remove_coord("latitude")
cube.add_aux_coord(coords.AuxCoord(points=cube.data,
standard_name='latitude',
units='degrees'), [0, 1])
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_boundmode_4bounds(self):
# Test exception translation.
# We can only get contiguous bounded grids with 2 bounds per point.
cube = self.bounded_cube
lat = coords.AuxCoord.from_coord(cube.coord("latitude"))
lat.bounds = np.array([lat.points, lat.points + 1,
lat.points + 2, lat.points + 3]).transpose()
cube.remove_coord("latitude")
cube.add_aux_coord(lat, 0)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
def test_different_coord_systems(self):
cube = self.bounded_cube
lat = cube.coord('latitude')
lon = cube.coord('longitude')
lat.coord_system = iris.coord_systems.GeogCS(7000000)
lon.coord_system = iris.coord_systems.GeogCS(7000001)
with self.assertRaises(ValueError):
iplt.pcolormesh(cube, coords=['longitude', 'latitude'])
@iris.tests.skip_data
class TestPlotOtherCoordSystems(tests.GraphicsTest):
def test_plot_tmerc(self):
filename = tests.get_data_path(('NetCDF', 'transverse_mercator',
'tmean_1910_1910.nc'))
self.cube = iris.load_cube(filename)
iplt.pcolormesh(self.cube[0])
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
ahoyosid/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
dmargala/qusp | examples/compare_delta.py | 1 | 7364 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import h5py
import qusp
import matplotlib.pyplot as plt
import scipy.interpolate
import fitsio
class DeltaLOS(object):
def __init__(self, thing_id):
path = '/data/lya/deltas/delta-%d.fits' % thing_id
hdulist = fitsio.FITS(path, mode=fitsio.READONLY)
self.pmf = hdulist[1].read_header()['pmf']
self.loglam = hdulist[1]['loglam'][:]
self.wave = np.power(10.0, self.loglam)
self.delta = hdulist[1]['delta'][:]
self.weight = hdulist[1]['weight'][:]
self.cont = hdulist[1]['cont'][:]
self.msha = hdulist[1]['msha'][:]
self.mabs = hdulist[1]['mabs'][:]
self.ivar = hdulist[1]['ivar'][:]
self.cf = self.cont*self.msha*self.mabs
self.flux = (1+self.delta)*self.cf
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
## targets to fit
parser.add_argument("--name", type=str, default=None,
help="target list")
parser.add_argument("--gamma", type=float, default=3.8,
help="LSS growth and redshift evolution of mean absorption gamma")
parser.add_argument("--index", type=int, default=1000,
help="target index")
parser.add_argument("--pmf", type=str, default=None,
help="target plate-mjd-fiber string")
args = parser.parse_args()
print 'Loading forest data...'
# import data
skim = h5py.File(args.name+'.hdf5', 'r')
if args.pmf:
plate, mjd, fiber = [int(val) for val in args.pmf.split('-')]
index = np.where((skim['meta']['plate'] == plate) & (skim['meta']['mjd'] == mjd) & (skim['meta']['fiber'] == fiber))[0][0]
else:
index = args.index
flux = np.ma.MaskedArray(skim['flux'][index], mask=skim['mask'][index])
ivar = np.ma.MaskedArray(skim['ivar'][index], mask=skim['mask'][index])
loglam = skim['loglam'][:]
wave = np.power(10.0, loglam)
z = skim['z'][index]
norm = skim['norm'][index]
meta = skim['meta'][index]
linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')
a = linear_continuum['params_a'][index]
b = linear_continuum['params_b'][index]
continuum = linear_continuum['continuum']
continuum_wave = linear_continuum['continuum_wave']
continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, ext=1, s=0)
abs_alpha = linear_continuum.attrs['abs_alpha']
abs_beta = linear_continuum.attrs['abs_beta']
forest_wave_ref = (1+z)*linear_continuum.attrs['forest_wave_ref']
wave_lya = linear_continuum.attrs['wave_lya']
forest_pixel_redshifts = wave/wave_lya - 1
abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)
print 'flux 1280 Ang: %.2f' % norm
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
def model_flux(a, b):
return a*np.power(wave/forest_wave_ref, b)*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
def chisq(p):
mflux = model_flux(p[0], p[1])
res = flux - mflux
return ma.sum(res*res*ivar)/ma.sum(ivar)
from scipy.optimize import minimize
result = minimize(chisq, (a, b))
a,b = result.x
print 'fit param a: %.2f' % a
print 'fit param b: %.2f' % b
# rest and obs refer to pixel grid
print 'Estimating deltas in forest frame...'
mflux = model_flux(a,b)
delta_flux = flux/mflux - 1.0
delta_ivar = ivar*mflux*mflux
forest_min_z = linear_continuum.attrs['forest_min_z']
forest_max_z = linear_continuum.attrs['forest_max_z']
forest_dz = 0.1
forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)
print 'Adjusting weights for pipeline variance and LSS variance...'
var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)
var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)
delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)
delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts))
thing_id = meta['thing_id']
pmf = '%s-%s-%s' % (meta['plate'],meta['mjd'],meta['fiber'])
los = DeltaLOS(thing_id)
my_msha = norm*a*np.power(wave/forest_wave_ref, b)
my_wave = wave
my_flux = norm*flux
my_cf = my_msha*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
my_ivar = ivar/(norm*norm)
my_delta = delta_flux
my_weight = delta_weight
# mean_ratio = np.average(my_msha*continuum)/ma.average(los.msha*los.cont)
# print mean_ratio
plt.figure(figsize=(12,4))
plt.plot(my_wave, my_flux, color='gray')
my_dflux = ma.power(my_ivar, -0.5)
plt.fill_between(my_wave, my_flux - my_dflux, my_flux + my_dflux, color='gray', alpha=0.5)
plt.plot(my_wave, my_msha*continuum_interp(wave/(1+z)), label='My continuum', color='blue')
plt.plot(los.wave, los.cont, label='Busca continuum', color='red')
plt.plot(my_wave, my_cf, label='My cf', color='green')
plt.plot(los.wave, los.cf, label='Busca cf', color='orange')
plt.legend()
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Observed Flux')
plt.xlim(los.wave[[0,-1]])
plt.savefig(args.name+'-example-flux.png', dpi=100, bbox_inches='tight')
plt.close()
plt.figure(figsize=(12,4))
my_delta_sigma = ma.power(delta_weight, -0.5)
# plt.fill_between(my_wave, my_delta - my_delta_sigma, my_delta + my_delta_sigma, color='blue', alpha=0.1, label='My Delta')
plt.scatter(my_wave, my_delta, color='blue', marker='+', label='My Delta')
plt.plot(my_wave, +my_delta_sigma, color='blue', ls=':')
plt.plot(my_wave, -my_delta_sigma, color='blue', ls=':')
los_delta_sigma = ma.power(los.weight, -0.5)
# plt.fill_between(los.wave, los.delta - los_delta_sigma, los.delta + los_delta_sigma, color='red', alpha=01, label='Busca Delta')
plt.scatter(los.wave, los.delta, color='red', marker='+', label='Busca Delta')
plt.plot(los.wave, +los_delta_sigma, color='red', ls=':')
plt.plot(los.wave, -los_delta_sigma, color='red', ls=':')
my_lss_sigma = np.sqrt(var_lss(forest_pixel_redshifts))
plt.plot(my_wave, +my_lss_sigma, color='black', ls='--')
plt.plot(my_wave, -my_lss_sigma, color='black', ls='--')
# my_sn_sigma = np.sqrt(np.power(1 + forest_pixel_redshifts, 0.5*abs_beta))/10
# plt.plot(my_wave, +my_sn_sigma, color='orange', ls='--')
# plt.plot(my_wave, -my_sn_sigma, color='orange', ls='--')
# import matplotlib.patches as mpatches
#
# blue_patch = mpatches.Patch(color='blue', alpha=0.3, label='My Delta')
# red_patch = mpatches.Patch(color='red', alpha=0.3, label='Busca Delta')
# plt.legend(handles=[blue_patch,red_patch])
plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
plt.ylim(-2,2)
plt.xlim(los.wave[[0,-1]])
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.ylabel(r'Delta')
plt.legend()
plt.savefig(args.name+'-example-delta.png', dpi=100, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
guildai/guild | examples/iris-svm/plot_iris_exercise.py | 1 | 1702 | """
A tutorial exercise for using different SVM kernels.
Adapted from:
https://scikit-learn.org/stable/auto_examples/exercises/plot_iris_exercise.html
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn import datasets, svm
kernel = 'linear' # choice of linear, rbf, poly
test_split = 0.1
random_seed = 0
degree = 3
gamma = 10
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(random_seed)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
split_pos = int((1 - test_split) * n_sample)
X_train = X[:split_pos]
y_train = y[:split_pos]
X_test = X[split_pos:]
y_test = y[split_pos:]
# fit the model
clf = svm.SVC(kernel=kernel, degree=degree, gamma=gamma)
clf.fit(X_train, y_train)
print("Train accuracy: %s" % clf.score(X_train, y_train))
print("Test accuracy: %f" % clf.score(X_test, y_test))
plt.figure()
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,
edgecolor='k', s=20)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',
zorder=10, edgecolor='k')
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.savefig("plot.png")
| apache-2.0 |
jzt5132/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
YinongLong/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
newville/scikit-image | doc/examples/plot_rank_mean.py | 17 | 1499 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=3, figsize=(8, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((image, percentile_result)))
ax0.set_title('Percentile mean')
ax0.axis('off')
ax1.imshow(np.hstack((image, bilateral_result)))
ax1.set_title('Bilateral mean')
ax1.axis('off')
ax2.imshow(np.hstack((image, normal_result)))
ax2.set_title('Local mean')
ax2.axis('off')
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
mne-tools/mne-python | mne/viz/circle.py | 14 | 15879 | """Functions to plot on circle as for connectivity."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(n_node_names,)
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int64)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolate connections around a single node when user left clicks a node.
On right click, resets all connections.
"""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of array | None
Two arrays with indices of connections for which the connections
strengths are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape (n_node_names,) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuple | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str | instance of matplotlib.colors.LinearSegmentedColormap
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : tuple, shape (2,)
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.figure.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | tuple, shape (3,)
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure handle.
axes : instance of matplotlib.projections.polar.PolarAxes
The subplot handle.
Notes
-----
This code is based on a circle graph example by Nicolas P. Rougier
By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``
into account when saving, even if set when a figure is generated. This
can be addressed via, e.g.::
>>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP
If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the
figure labels, title, and legend may be cut off in the output figure.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
try:
spectral = plt.cm.spectral
except AttributeError:
spectral = plt.cm.Spectral
node_colors = [spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True)
axes.set_facecolor(facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int64)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| bsd-3-clause |
zorroblue/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | examples/cluster/plot_lena_compress.py | 271 | 2229 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Vector Quantization Example
=========================================================
The classic image processing example, Lena, an 8-bit grayscale
bit-depth, 512 x 512 sized image, is used here to illustrate
how `k`-means is used for vector quantization.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn import cluster
n_clusters = 5
np.random.seed(0)
try:
lena = sp.lena()
except AttributeError:
# Newer versions of scipy have lena in misc
from scipy import misc
lena = misc.lena()
X = lena.reshape((-1, 1)) # We need an (n_sample, n_feature) array
k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4)
k_means.fit(X)
values = k_means.cluster_centers_.squeeze()
labels = k_means.labels_
# create an array from labels and values
lena_compressed = np.choose(labels, values)
lena_compressed.shape = lena.shape
vmin = lena.min()
vmax = lena.max()
# original lena
plt.figure(1, figsize=(3, 2.2))
plt.imshow(lena, cmap=plt.cm.gray, vmin=vmin, vmax=256)
# compressed lena
plt.figure(2, figsize=(3, 2.2))
plt.imshow(lena_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# equal bins lena
regular_values = np.linspace(0, 256, n_clusters + 1)
regular_labels = np.searchsorted(regular_values, lena) - 1
regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean
regular_lena = np.choose(regular_labels.ravel(), regular_values)
regular_lena.shape = lena.shape
plt.figure(3, figsize=(3, 2.2))
plt.imshow(regular_lena, cmap=plt.cm.gray, vmin=vmin, vmax=vmax)
# histogram
plt.figure(4, figsize=(3, 2.2))
plt.clf()
plt.axes([.01, .01, .98, .98])
plt.hist(X, bins=256, color='.5', edgecolor='.5')
plt.yticks(())
plt.xticks(regular_values)
values = np.sort(values)
for center_1, center_2 in zip(values[:-1], values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b')
for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]):
plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--')
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
smorante/continuous-goal-directed-actions | demonstration-feature-selection/src/alternatives/main_dtw_mds_norm.py | 2 | 3731 | # -*- coding: utf-8 -*-
"""
Author: Santiago Morante
Robotics Lab. Universidad Carlos III de Madrid
"""
########################## DTW ####################################
import libmddtw
import matplotlib.pyplot as plt
from dtw import dtw
########################## MDS ####################################
import numpy as np
from sklearn.metrics import euclidean_distances
import libmds
########################## DBSCAN ####################################
import libdbscan
from sklearn.preprocessing import StandardScaler # to normalize
def normalize(X):
return StandardScaler().fit_transform(X)
def main():
NUMBER_OF_DEMONSTRATIONS=5
##########################################################################
########################## DTW ####################################
##########################################################################
dist=np.zeros((NUMBER_OF_DEMONSTRATIONS,NUMBER_OF_DEMONSTRATIONS))
demons=[]
# fill demonstrations
for i in range(NUMBER_OF_DEMONSTRATIONS):
demons.append(np.matrix([ np.sin(np.arange(15+i)+i) , np.sin(np.arange(15+i)+i)]))
# fill distance matrix
for i in range(NUMBER_OF_DEMONSTRATIONS):
for j in range(NUMBER_OF_DEMONSTRATIONS):
mddtw = libmddtw.Mddtw()
x,y = mddtw.collapseRows(demons[i],demons[j])
#fig = plt.figure()
#plt.plot(x)
#plt.plot(y)
singleDist, singleCost, singlePath = mddtw.compute(demons[i],demons[j])
dist[i][j]=singleDist
# print 'Minimum distance found:', singleDist
#fig = plt.figure()
# plt.imshow(cost.T, origin='lower', cmap=plt.cm.gray, interpolation='nearest')
# plt.plot(path[0], path[1], 'w')
# plt.xlim((-0.5, cost.shape[0]-0.5))
# plt.ylim((-0.5, cost.shape[1]-0.5))
# print "dist", dist
###########################################################################
########################### MDS ####################################
###########################################################################
md = libmds.Mds()
md.create(n_components=1, metric=False, max_iter=3000, eps=1e-9, random_state=None,
dissimilarity="precomputed", n_jobs=1)
points = md.compute(dist)
print "points", points.flatten()
# md.plot()
##########################################################################
########################## norm ####################################
##########################################################################
from scipy.stats import norm
from numpy import linspace
from pylab import plot,show,hist,figure,title
param = norm.fit(points.flatten()) # distribution fitting
x = linspace(np.min(points),np.max(points),NUMBER_OF_DEMONSTRATIONS)
pdf_fitted = norm.pdf(x, loc=param[0],scale=param[1])
fig = plt.figure()
title('Normal distribution')
plot(x,pdf_fitted,'r-')
hist(points.flatten(),normed=1,alpha=.3)
show()
for elem in points:
if elem <= np.mean(points):
print "probability of point ", str(elem), ": ", norm.cdf(elem, loc=param[0],scale=param[1])
if elem > np.mean(points):
print "probability of point ", str(elem), ": ", 1-norm.cdf(elem, loc=param[0],scale=param[1])
##############################################################################
##############################################################################
if __name__ == "__main__":
main() | mit |
glouppe/scikit-learn | examples/model_selection/plot_roc.py | 49 | 5041 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
rmm-fcul/workshops | 2015_graz/binary_choice/two_arenas_real_real/casu_utils.py | 5 | 8116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
a library of functions used in CASU controller dynamics. Got a lot of
messy code that would be neater like this
RM, Feb 2015
'''
import numpy as np
from assisipy import casu
#import matplotlib.cm as cm
from datetime import datetime
import parsing
import time
### ============= maths ============= ###
#{{{ rolling_avg
def rolling_avg(x, n):
'''
given the sample x, provide a rolling average taking n samples per data point.
NOT a quick solution, but easy...
'''
y = np.zeros((len(x),))
for ctr in range(len(x)):
y[ctr] = np.sum(x[ctr:(ctr+n)])
return y/n
#}}}
### ============= general behaviour ============= ###
#{{{ measure_ir_sensors
def measure_ir_sensors(mycasu, detect_data):
''' count up sensors that detect a bee, plus rotate history array '''
# don't discriminate between specific directions, so just accumulate all
count = 0
for (val,t) in zip(mycasu.get_ir_raw_value(casu.ARRAY), mycasu.threshold):
if (val > t):
count += 1
#print "raw:",
#print ",".join(["{:.2f}".format(x) for x in mycasu.get_ir_raw_value(casu.ARRAY)])
#mycasu.total_count += count # historical count over all time
detect_data = np.roll(detect_data, 1) # step all positions back
detect_data[0] = count # and overwrite the first entry (this was rolled
# around, so is the oldest entry -- and to become the newest now)
# allow ext usage to apply window -- remain agnostic here during collection.
return detect_data, count
#}}}
#{{{ heater_one_step
def heater_one_step(h):
'''legacy function'''
return detect_bee_proximity_saturated(h)
def detect_bee_proximity_saturated(h):
# measure proximity
detect_data, count = measure_ir_sensors(h, h.detect_data)
h.detect_data = detect_data
# overall bee count for this casu
sat_count = min(h.sat_lim, count) # saturates
return sat_count
#}}}
#{{{ find_mean_ext_temp
def find_mean_ext_temp(h):
r = []
for sensor in [casu.TEMP_F, casu.TEMP_B, casu.TEMP_L, casu.TEMP_R ]:
r.append(h.get_temp(sensor))
if len(r):
mean = sum(r) / float(len(r))
else:
mean = 0.0
return mean
#}}}
### ============= inter-casu comms ============= ###
#{{{ comms functions
def transmit_my_count(h, sat_count, dest='accomplice'):
s = "{}".format(sat_count)
if h.verb > 1:
print "\t[i]==> {} send msg ({} by): '{}' bees, to {}".format(
h._thename, len(s), s, dest)
h.send_message(dest, s)
#TODO: this is non-specific, i.e., any message from anyone is assumed to have
# the right form. For heterogeneous neighbours, we need to check identity as
# well
def recv_all_msgs(h, retry_cnt=0, max_recv=None):
'''
continue to read message bffer until no more messages.
as list of parsed messages parsed into (src, float) pairs
'''
msgs = []
try_cnt = 0
while(True):
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
msgs.append((src, bee_cnt))
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, {4} from {0} {5}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename,
BLU, ENDC)
if h.verb > 1:
#print dir(msg)
print msg.items()
if(max_recv is not None and len(msgs) >= max_recv):
break
else:
# buffer emptied, return
try_cnt += 1
if try_cnt > retry_cnt:
break
return msgs
def recv_neighbour_msg(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = int(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
def recv_neighbour_msg_w_src(h):
''' provide the source of a message as well as the message count'''
bee_cnt = 0
src = None
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
src = msg['sender']
bee_cnt = float(txt.split()[0])
if h.verb >1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
if h.verb > 1:
#print dir(msg)
print msg.items()
return bee_cnt, src
def recv_neighbour_msg_flt(h):
bee_cnt = 0
msg = h.read_message()
#print msg
if msg:
txt = msg['data'].strip()
bee_cnt = float(txt.split()[0])
if h.verb > 1:
print "\t[i]<== {3} recv msg ({2} by): '{1}' bees, from {0}".format(
msg['sender'], bee_cnt, len(msg['data']), h._thename)
return bee_cnt;
#}}}
def find_comms_mapping(name, rtc_path, suffix='-sim', verb=True):
links = parsing.find_comm_link_mapping(
name, rtc_path=rtc_path, suffix=suffix, verb=verb)
if verb:
print "[I] for {}, found the following nodes/edges".format(name)
print "\t", links.items()
print "\n===================================\n\n"
return links
### ============= display ============= ###
#{{{ term codes for colored text
ERR = '\033[41m'
BLU = '\033[34m'
ENDC = '\033[0m'
#}}}
#{{{ color funcs
#def gen_cmap(m='hot', n=32) :
# return cm.get_cmap(m, n) # get LUT with 32 values -- some gradation but see steps
def gen_clr_tgt(new_temp, cmap, tgt=None, min_temp=28.0, max_temp=38.0):
t_rng = float(max_temp - min_temp)
fr = (new_temp - min_temp) / t_rng
i = int(fr * len(cmap))
# compute basic color, if on target
#r,g,b,a = cmap(i)
g = 0.0; b = 0.0; a = 1.0;
i = sorted([0, i, len(cmap)-1])[1]
r = cmap[i]
# now adjust according to distance from target
if tgt is None: tgt=new_temp
dt = np.abs(new_temp - tgt)
dt_r = dt / t_rng
h2 = np.array([r,g,b])
h2 *= (1-dt_r)
return h2
# a colormap with 8 settings, taht doesn't depend on the presence of
# matplotlib (hard-coded though.) -- depricating
_clrs = [
(0.2, 0.2, 0.2),
(0.041, 0, 0),
(0.412, 0, 0),
(0.793, 0, 0),
(1, 0.174, 0),
(1, 0.555, 0),
(1, 0.936, 0),
(1, 1, 0.475),
(1, 1, 1),
]
_dflt_clr = (0.2, 0.2, 0.2)
# can access other gradations of colour using M = cm.hot(n) for n steps, then
# either extract them once (`clrs = M(arange(n)`) or each time ( `clr_x = M(x)`)
# BT here we're going to use 8 steps for all CASUs so no bother.
#}}}
def sep_with_nowtime():
print "# =================== t={} =================== #\n".format(
datetime.now().strftime("%H:%M:%S"))
### ============= more generic ============= ###
#{{{ a struct constructor
# some handy python utilities, from Kier Dugan
class Struct:
def __init__ (self, **kwargs):
self.__dict__.update (kwargs)
def get(self, key, default=None):
return self.__dict__.get(key, default)
def addFields(self, **kwargs):
# add other fields (basically variables) after initialisation
self.__dict__.update (kwargs)
#}}}
### calibraiont
def _calibrate(h, calib_steps, calib_gain=1.1, interval=0.1):
'''
read the sensors several times, and take the highest reading
seen as the threshold.
'''
h._raw_thresh = [0] * 7 # default cases for threshold
for stp in xrange(calib_steps):
for i, v in enumerate(h.get_ir_raw_value(casu.ARRAY)):
if v > h._raw_thresh[i]:
h._raw_thresh[i] = v
time.sleep(interval)
h.thresh = [x*calib_gain for x in h._raw_thresh]
h.threshold = [x*calib_gain for x in h._raw_thresh]
if h.verb:
_ts =", ".join(["{:.2f}".format(x) for x in h.thresh])
print "[I] post-calibration, we have thresh: ", _ts
| lgpl-3.0 |
yonglehou/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
phoebe-project/phoebe2-docs | 2.1/examples/minimal_contact_binary.py | 1 | 5694 | #!/usr/bin/env python
# coding: utf-8
# Minimal Contact Binary System
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
# Here we'll initialize a default binary, but ask for it to be created as a contact system.
# In[3]:
b_cb = phoebe.default_binary(contact_binary=True)
# We'll compare this to the default detached binary
# In[4]:
b_detached = phoebe.default_binary()
# Hierarchy
# -------------
# Let's first look at the hierarchy of the default detached binary, and then compare that to the hierarchy of the overcontact system
# In[5]:
print b_detached.hierarchy
# In[6]:
print b_cb.hierarchy
# As you can see, the overcontact system has an additional "component" with method "envelope" and component label "contact_envelope".
#
# Next let's look at the parameters in the envelope and star components. You can see that most of parameters in the envelope class are constrained, while the equivalent radius of the primary is unconstrained. The value of primary equivalent radius constrains the potential and fillout factor of the envelope, as well as the equivalent radius of the secondary.
# In[7]:
print b_cb.filter(component='contact_envelope', kind='envelope', context='component')
# In[8]:
print b_cb.filter(component='primary', kind='star', context='component')
# In[9]:
b_cb['requiv@primary'] = 1.5
# In[10]:
b_cb['pot@contact_envelope@component']
# In[11]:
b_cb['fillout_factor@contact_envelope@component']
# In[12]:
b_cb['requiv@secondary@component']
# Now, of course, if we didn't originally know we wanted a contact binary and built the default detached system, we could still turn it into an contact binary just by changing the hierarchy.
# In[13]:
b_detached.add_component('envelope', component='contact_envelope')
# In[14]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'], b_detached['contact_envelope'])
print hier
# In[15]:
b_detached.set_hierarchy(hier)
# In[16]:
print b_detached.hierarchy
# However, since our system was detached, the system is not overflowing, and therefore doesn't pass system checks
# In[17]:
b_detached.run_checks()
# And because of this, the potential and requiv@secondary constraints cannot be computed
# In[18]:
b_detached['pot@component']
# In[19]:
b_detached['requiv@secondary@component']
# Likewise, we can make a contact system detached again simply by removing the envelope from the hierarchy. The parameters themselves will still exist (unless you remove them), so you can always just change the hierarchy again to change back to an overcontact system.
# In[20]:
hier = phoebe.hierarchy.binaryorbit(b_detached['binary'], b_detached['primary'], b_detached['secondary'])
print hier
# In[21]:
b_detached.set_hierarchy(hier)
# In[22]:
print b_detached.hierarchy
# Although the constraints have been removed, PHOEBE has lost the original value of the secondary radius (because of the failed contact constraints), so we'll have to reset that here as well.
# In[23]:
b_detached['requiv@secondary'] = 1.0
# Adding Datasets
# ---------------------
# In[24]:
b_cb.add_dataset('mesh', times=[0], dataset='mesh01')
# In[25]:
b_cb.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[26]:
b_cb.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[27]:
b_cb.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# For comparison, we'll do the same to our detached system
# In[28]:
b_detached.add_dataset('mesh', times=[0], dataset='mesh01')
# In[29]:
b_detached.add_dataset('orb', times=np.linspace(0,1,201), dataset='orb01')
# In[30]:
b_detached.add_dataset('lc', times=np.linspace(0,1,21), dataset='lc01')
# In[31]:
b_detached.add_dataset('rv', times=np.linspace(0,1,21), dataset='rv01')
# Running Compute
# --------------------
# In[32]:
b_cb.run_compute(irrad_method='none')
# In[33]:
b_detached.run_compute(irrad_method='none')
# Synthetics
# ------------------
# To ensure compatibility with computing synthetics in detached and semi-detached systems in Phoebe, the synthetic meshes for our overcontact system are attached to each component separetely, instead of the contact envelope.
# In[34]:
print b_cb['mesh01@model'].components
# In[35]:
print b_detached['mesh01@model'].components
# Plotting
# ---------------
# ### Meshes
# In[36]:
afig, mplfig = b_cb['mesh01@model'].plot(x='ws', show=True)
# In[37]:
afig, mplfig = b_detached['mesh01@model'].plot(x='ws', show=True)
# ### Orbits
# In[38]:
afig, mplfig = b_cb['orb01@model'].plot(x='ws',show=True)
# In[39]:
afig, mplfig = b_detached['orb01@model'].plot(x='ws',show=True)
# ### Light Curves
# In[40]:
afig, mplfig = b_cb['lc01@model'].plot(show=True)
# In[41]:
afig, mplfig = b_detached['lc01@model'].plot(show=True)
# ### RVs
# In[42]:
afig, mplfig = b_cb['rv01@model'].plot(show=True)
# In[43]:
afig, mplfig = b_detached['rv01@model'].plot(show=True)
# In[ ]:
| gpl-3.0 |
jereze/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
jeremiedecock/snippets | python/matplotlib/hist_logscale_x.py | 1 | 1804 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make a histogram using a logarithmic scale on X axis
See:
- http://stackoverflow.com/questions/6855710/how-to-have-logarithmic-bins-in-a-python-histogram
"""
import numpy as np
import matplotlib.pyplot as plt
# SETUP #######################################################################
# histtype : [‘bar’ | ‘barstacked’ | ‘step’ | ‘stepfilled’]
HIST_TYPE='bar'
ALPHA=0.5
# MAKE DATA ###################################################################
data = np.random.exponential(size=1000000)
#data = np.abs(np.random.normal(size=1000000) * 10000.)
#data = np.random.chisquare(10, size=1000000)
# INIT FIGURE #################################################################
fig = plt.figure(figsize=(8.0, 6.0))
# AX1 #########################################################################
ax1 = fig.add_subplot(211)
res_tuple = ax1.hist(data,
bins=50,
histtype=HIST_TYPE,
alpha=ALPHA)
ax1.set_title("Normal scale")
ax1.set_xlabel("Value")
ax1.set_ylabel("Count")
# AX2 #########################################################################
ax2 = fig.add_subplot(212)
vmin = np.log10(data.min())
vmax = np.log10(data.max())
bins = np.logspace(vmin, vmax, 50) # <- make a range from 10**vmin to 10**vmax
print(bins)
res_tuple = ax2.hist(data,
bins=bins,
histtype=HIST_TYPE,
alpha=ALPHA)
ax2.set_xscale("log") # <- Activate log scale on X axis
ax2.set_title("Log scale")
ax2.set_xlabel("Value")
ax2.set_ylabel("Count")
# SHOW AND SAVE FILE ##########################################################
plt.tight_layout()
plt.savefig("hist_logscale_x.png")
plt.show()
| mit |
pypot/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/experiment029.py | 2 | 3262 | from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
import lasagne
from gen_data_029 import gen_data, N_BATCH, LENGTH
theano.config.compute_test_value = 'raise'
# Number of units in the hidden (recurrent) layer
N_HIDDEN = 5
# SGD learning rate
LEARNING_RATE = 1e-1
# Number of iterations to train the net
N_ITERATIONS = 200
# Generate a "validation" sequence whose cost we will periodically compute
X_val, y_val = gen_data()
n_features = X_val.shape[-1]
n_output = y_val.shape[-1]
assert X_val.shape == (N_BATCH, LENGTH, n_features)
assert y_val.shape == (N_BATCH, LENGTH, n_output)
# Construct LSTM RNN: One LSTM layer and one dense output layer
l_in = lasagne.layers.InputLayer(shape=(N_BATCH, LENGTH, n_features))
# setup fwd and bck LSTM layer.
l_fwd = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)
l_bck = lasagne.layers.LSTMLayer(
l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)
# concatenate forward and backward LSTM layers
l_fwd_reshape = lasagne.layers.ReshapeLayer(l_fwd, (N_BATCH*LENGTH, N_HIDDEN))
l_bck_reshape = lasagne.layers.ReshapeLayer(l_bck, (N_BATCH*LENGTH, N_HIDDEN))
l_concat = lasagne.layers.ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)
l_recurrent_out = lasagne.layers.DenseLayer(
l_concat, num_units=n_output, nonlinearity=None)
l_out = lasagne.layers.ReshapeLayer(
l_recurrent_out, (N_BATCH, LENGTH, n_output))
input = T.tensor3('input')
target_output = T.tensor3('target_output')
# add test values
input.tag.test_value = np.random.rand(
*X_val.shape).astype(theano.config.floatX)
target_output.tag.test_value = np.random.rand(
*y_val.shape).astype(theano.config.floatX)
# Cost = mean squared error
cost = T.mean((l_out.get_output(input) - target_output)**2)
# Use NAG for training
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)
# Theano functions for training, getting output, and computing cost
train = theano.function([input, target_output],
cost, updates=updates, on_unused_input='warn',
allow_input_downcast=True)
y_pred = theano.function(
[input], l_out.get_output(input), on_unused_input='warn',
allow_input_downcast=True)
compute_cost = theano.function(
[input, target_output], cost, on_unused_input='warn',
allow_input_downcast=True)
# Train the net
def run_training():
costs = np.zeros(N_ITERATIONS)
for n in range(N_ITERATIONS):
X, y = gen_data()
# you should use your own training data mask instead of mask_val
costs[n] = train(X, y)
if not n % 10:
cost_val = compute_cost(X_val, y_val)
print "Iteration {} validation cost = {}".format(n, cost_val)
plt.plot(costs)
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.show()
def plot_estimates():
X, y = gen_data()
y_predictions = y_pred(X)
ax = plt.gca()
ax.plot(y_predictions[0,:,0], label='estimate')
ax.plot(y[0,:,0], label='ground truth')
# ax.plot(X[0,:,0], label='aggregate')
ax.legend()
plt.show()
run_training()
plot_estimates()
| mit |
ephes/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 182 | 1743 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
aguirrea/lucy | tests/lfootGraph.py | 1 | 6007 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Andrés Aguirre Dorelo
#
# MINA/INCO/UDELAR
#
# module for finding the steps in the tutors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import glob
import ntpath
from parser.BvhImport import BvhImport
import matplotlib.pyplot as plt
from configuration.LoadSystemConfiguration import LoadSystemConfiguration
import numpy as np
from scipy.signal import argrelextrema
from collections import Counter
sysConf = LoadSystemConfiguration()
BVHDir = os.getcwd() + sysConf.getDirectory("CMU mocap Files")
Y_THREADHOLD = 11 #TODO calculate this as the average of the steps_highs
X_THREADHOLD = 36
def firstMax(values1, values2):
res=0
for i in range(len(values1)-2):
if values1[i] < values1[i+1] and values1[i+1] > values1[i+2]: #i+1 is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i+1
elif values1[i] < values1[i+1] < values1[i+2]: #i is a local maximun
if (values1[i] - values2[i]) > THREADHOLD:
res=i
return res
def find_nearest(a, a0):
"Element in nd array `a` closest to the scalar value `a0`"
idx = np.abs(a - a0).argmin()
return a.flat[idx]
for filename in glob.glob(os.path.join(BVHDir, '*.bvh')):
print "transforming: " + filename + " ..."
parser = BvhImport(filename)
x_,y_,z_ = parser.getNodePositionsFromName("lFoot")
y1 = []
y2 = []
x1 = []
x2 = []
for key, value in y_.iteritems():
y1.append(value)
x1.append(key)
x_,y_,z_ = parser.getNodePositionsFromName("rFoot")
for key, value in y_.iteritems():
y2.append(value)
x2.append(key)
maxLfootIndexes = [x for x in argrelextrema(np.array(y1), np.greater)[0]]
maxRfootIndexes = [x for x in argrelextrema(np.array(y2), np.greater)[0]]
stepsLfootIndexes = []
for i in range(len(maxLfootIndexes)):
index = maxLfootIndexes[i]
if y1[index] - y2[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsLfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsLfootIndexes), index) > X_THREADHOLD): #avoid max near an existing point
stepsLfootIndexes.append(index)
print "appeend L"
else:
if y1[find_nearest(np.array(stepsLfootIndexes), index)] < y1[index]: #check if the exiting near max is a local maximun
print "remove L", find_nearest(np.array(stepsLfootIndexes), index), "from: ", stepsLfootIndexes
stepsLfootIndexes.remove(find_nearest(np.array(stepsLfootIndexes), index))
print "remove L"
stepsLfootIndexes.append(index)
print "appeend L"
else:
stepsLfootIndexes.append(index)
print "appeend L"
stepsRfootIndexes = []
for i in range(len(maxRfootIndexes)):
index = maxRfootIndexes[i]
if y2[index] - y1[index] > Y_THREADHOLD: #one foot is up and the other is in the floor
if len(stepsRfootIndexes)>0:
if abs(index - find_nearest(np.array(stepsRfootIndexes),index) > X_THREADHOLD): #avoid max near an existing point
stepsRfootIndexes.append(index)
print "appeend R"
else:
if y2[find_nearest(np.array(stepsRfootIndexes), index)] < y2[index]: #check if the exiting near max is a local maximun
print "remove R", find_nearest(np.array(stepsRfootIndexes), index), "from: ", stepsRfootIndexes, "index: ", index
stepsRfootIndexes.remove(find_nearest(np.array(stepsRfootIndexes), index))
print "remove R"
stepsRfootIndexes.append(index)
print "appeend R"
else:
stepsRfootIndexes.append(index)
print "appeend R"
if stepsLfootIndexes[0] < stepsRfootIndexes[0]:
if len(stepsLfootIndexes) > 2:
testPoint = stepsLfootIndexes[1]
while(y1[testPoint]>y2[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: ", stepsLfootIndexes[1], "end: ", end
else:
end = len(y1)
print "red over green| ", "red: ", stepsLfootIndexes[0], "green: ", stepsRfootIndexes[0], "second red: -----", "end: ", end
else:
if len(stepsRfootIndexes) > 2:
testPoint = stepsRfootIndexes[1]
while(y2[testPoint]>y1[testPoint]):
testPoint = testPoint + 1
end = testPoint + 5
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: ", stepsRfootIndexes[1], "end: ", end
else:
end = len(y2)
print "green over red| ", "green: ", stepsRfootIndexes[0], "red: ", stepsLfootIndexes[0], "second green: -----", "end: ", end
plt.plot(x1, y1,'ro')
plt.plot(x1, y2,'g')
plt.show()
| gpl-3.0 |
kylerbrown/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
nasseralkmim/SaPy | sapy/plotter.py | 1 | 4743 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Line3D
from matplotlib.lines import Line2D
import numpy as np
def window(name):
return plt.figure(name)
def show():
plt.show()
return None
def undeformed(model):
"""Plot the undeformed structure according to the dimension
"""
if model.ndm == 2:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
label2d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
if model.ndm == 3:
undeformed = window('Undeformed')
axes = undeformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
label3d(model.XYZ, model.CON, axes)
undeformed.tight_layout()
def deformed(model, U):
"""Plot the deformed structure according to the dimension
"""
CON = model.CON
XYZ = np.copy(model.XYZ)
for n in range(model.nn):
for d in range(model.ndf[n]):
dof = model.DOF[n, d]
XYZ[n, d] += U[dof]
if model.ndm == 2:
deformed = window('Deformed')
axes = deformed.add_subplot(111, aspect='equal')
geo2d(XYZ, CON, axes, 'tomato')
geo2d(model.XYZ, model.CON, axes, 'black')
label2d(XYZ, CON, axes)
deformed.tight_layout()
if model.ndm == 3:
deformed = window('Deformed')
axes = deformed.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
geo3d(XYZ, CON, axes, 'tomato')
label3d(XYZ, CON, axes)
deformed.tight_layout()
def geo3d(XYZ, CON, axes, color):
"""Plot the 3d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
# draw nodes
for node, xyz in enumerate(XYZ):
axes.scatter(xyz[0], xyz[1], xyz[2], c='k', alpha=1, marker='s')
# draw edges
for ele, con in enumerate(CON):
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
zs = [XYZ[con[0]][2], XYZ[con[1]][2]]
line = Line3D(xs, ys, zs, linewidth=1.0, color=color)
axes.add_line(line)
def label3d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], xyz[2], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(ele), color='g', size=10)
def geo2d(XYZ, CON, axes, color):
"""Plot the 2d model
"""
axes.set_xlabel('x')
axes.set_ylabel('y')
# draw nodes
for xyz in XYZ:
axes.scatter(xyz[0], xyz[1], c='k', alpha=1, marker='s')
# draw edges
for con in CON:
xs = [XYZ[con[0]][0], XYZ[con[1]][0]]
ys = [XYZ[con[0]][1], XYZ[con[1]][1]]
line = Line2D(xs, ys, linewidth=1.0, color=color)
axes.add_line(line)
def label2d(XYZ, CON, axes):
"""Plot the nodes and element label
"""
for node, xyz in enumerate(XYZ):
axes.text(xyz[0], xyz[1], str(node), color='b', size=10)
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(ele), color='g', size=10)
def axialforce(model, Q):
"""Plot axial force
"""
if model.ndm == 2:
axial = window('Axial')
axes = axial.add_subplot(111, aspect='equal')
geo2d(model.XYZ, model.CON, axes, color='black')
axial2d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
if model.ndm == 3:
axial = window('Axial')
axes = axial.add_subplot(111, projection='3d', aspect='equal')
geo3d(model.XYZ, model.CON, axes, 'black')
axial3d(model.XYZ, model.CON, Q, axes)
axial.tight_layout()
def axial2d(XYZ, CON, Q, axes):
"""Plot text with axial force value
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
axes.text(xm, ym, str(np.round_(Q[ele], 1)), color='g', size=10)
def axial3d(XYZ, CON, Q, axes):
"""Plot text with axial force value for 3d plot
"""
for ele, con in enumerate(CON):
xm = (XYZ[con[0]][0] + XYZ[con[1]][0])/2
ym = (XYZ[con[0]][1] + XYZ[con[1]][1])/2
zm = (XYZ[con[0]][2] + XYZ[con[1]][2])/2
axes.text(xm, ym, zm, str(np.round_(Q[ele], 1)), color='g', size=10)
| gpl-3.0 |
hennersz/pySpace | basemap/doc/users/figures/omerc.py | 6 | 1065 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# setup oblique mercator basemap.
# width is width of map projection region in km (xmax-xmin_
# height is height of map projection region in km (ymax-ymin)
# lon_0, lat_0 are the central longitude and latitude of the projection.
# lat_1,lon_1 and lat_2,lon_2 are two pairs of points that define
# the projection centerline.
# Map projection coordinates are automatically rotated to true north.
# To avoid this, set no_rot=True.
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
m = Basemap(height=16700000,width=12000000,
resolution='l',area_thresh=1000.,projection='omerc',\
lon_0=-100,lat_0=15,lon_2=-120,lat_2=65,lon_1=-50,lat_1=-55)
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
m.drawparallels(np.arange(-80.,81.,20.))
m.drawmeridians(np.arange(-180.,181.,20.))
m.drawmapboundary(fill_color='aqua')
plt.title("Oblique Mercator Projection")
plt.show()
| gpl-3.0 |
jlegendary/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tri/triplot.py | 8 | 3150 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| apache-2.0 |